aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2008-10-22 23:57:26 -0400
committerLen Brown <len.brown@intel.com>2008-10-23 00:11:07 -0400
commit057316cc6a5b521b332a1d7ccc871cd60c904c74 (patch)
tree4333e608da237c73ff69b10878025cca96dcb4c8 /arch/x86/kernel
parent3e2dab9a1c2deb03c311eb3f83466009147ed4d3 (diff)
parent2515ddc6db8eb49a79f0fe5e67ff09ac7c81eab4 (diff)
Merge branch 'linus' into test
Conflicts: MAINTAINERS arch/x86/kernel/acpi/boot.c arch/x86/kernel/acpi/sleep.c drivers/acpi/Kconfig drivers/pnp/Makefile drivers/pnp/quirks.c Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/Makefile23
-rw-r--r--arch/x86/kernel/acpi/boot.c26
-rw-r--r--arch/x86/kernel/acpi/sleep.c3
-rw-r--r--arch/x86/kernel/alternative.c10
-rw-r--r--arch/x86/kernel/amd_iommu.c337
-rw-r--r--arch/x86/kernel/amd_iommu_init.c194
-rw-r--r--arch/x86/kernel/aperture_64.c6
-rw-r--r--arch/x86/kernel/apic.c (renamed from arch/x86/kernel/apic_32.c)1038
-rw-r--r--arch/x86/kernel/apic_64.c1390
-rw-r--r--arch/x86/kernel/apm_32.c1
-rw-r--r--arch/x86/kernel/asm-offsets_64.c2
-rw-r--r--arch/x86/kernel/bios_uv.c137
-rw-r--r--arch/x86/kernel/cpu/.gitignore1
-rw-r--r--arch/x86/kernel/cpu/Makefile34
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c88
-rw-r--r--arch/x86/kernel/cpu/amd.c548
-rw-r--r--arch/x86/kernel/cpu/amd_64.c224
-rw-r--r--arch/x86/kernel/cpu/centaur.c4
-rw-r--r--arch/x86/kernel/cpu/centaur_64.c6
-rw-r--r--arch/x86/kernel/cpu/cmpxchg.c72
-rw-r--r--arch/x86/kernel/cpu/common.c1001
-rw-r--r--arch/x86/kernel/cpu/common_64.c712
-rw-r--r--arch/x86/kernel/cpu/cpu.h19
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c13
-rw-r--r--arch/x86/kernel/cpu/cpufreq/elanfreq.c42
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longhaul.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k6.c41
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c2
-rw-r--r--arch/x86/kernel/cpu/cyrix.c23
-rw-r--r--arch/x86/kernel/cpu/feature_names.c84
-rw-r--r--arch/x86/kernel/cpu/intel.c365
-rw-r--r--arch/x86/kernel/cpu/intel_64.c95
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c169
-rw-r--r--arch/x86/kernel/cpu/mcheck/k7.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_32.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/non-fatal.c2
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.pl32
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c7
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c4
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c274
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c97
-rw-r--r--arch/x86/kernel/cpu/powerflags.c20
-rw-r--r--arch/x86/kernel/cpu/transmeta.c32
-rw-r--r--arch/x86/kernel/cpu/umc.c3
-rw-r--r--arch/x86/kernel/cpuid.c5
-rw-r--r--arch/x86/kernel/crash_dump_32.c3
-rw-r--r--arch/x86/kernel/crash_dump_64.c14
-rw-r--r--arch/x86/kernel/doublefault_32.c2
-rw-r--r--arch/x86/kernel/ds.c954
-rw-r--r--arch/x86/kernel/dumpstack_32.c449
-rw-r--r--arch/x86/kernel/dumpstack_64.c575
-rw-r--r--arch/x86/kernel/e820.c32
-rw-r--r--arch/x86/kernel/early-quirks.c66
-rw-r--r--arch/x86/kernel/early_printk.c748
-rw-r--r--arch/x86/kernel/efi.c10
-rw-r--r--arch/x86/kernel/entry_32.S38
-rw-r--r--arch/x86/kernel/entry_64.S45
-rw-r--r--arch/x86/kernel/es7000_32.c363
-rw-r--r--arch/x86/kernel/ftrace.c124
-rw-r--r--arch/x86/kernel/genapic_64.c88
-rw-r--r--arch/x86/kernel/genapic_flat_64.c64
-rw-r--r--arch/x86/kernel/genx2apic_cluster.c159
-rw-r--r--arch/x86/kernel/genx2apic_phys.c154
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c138
-rw-r--r--arch/x86/kernel/head.c1
-rw-r--r--arch/x86/kernel/head64.c5
-rw-r--r--arch/x86/kernel/head_32.S34
-rw-r--r--arch/x86/kernel/head_64.S4
-rw-r--r--arch/x86/kernel/hpet.c459
-rw-r--r--arch/x86/kernel/i387.c168
-rw-r--r--arch/x86/kernel/i8259.c24
-rw-r--r--arch/x86/kernel/io_apic.c (renamed from arch/x86/kernel/io_apic_64.c)2071
-rw-r--r--arch/x86/kernel/io_apic_32.c2901
-rw-r--r--arch/x86/kernel/ioport.c1
-rw-r--r--arch/x86/kernel/ipi.c3
-rw-r--r--arch/x86/kernel/irq.c189
-rw-r--r--arch/x86/kernel/irq_32.c194
-rw-r--r--arch/x86/kernel/irq_64.c169
-rw-r--r--arch/x86/kernel/irqinit_32.c84
-rw-r--r--arch/x86/kernel/irqinit_64.c69
-rw-r--r--arch/x86/kernel/k8.c5
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kernel/kvmclock.c30
-rw-r--r--arch/x86/kernel/ldt.c10
-rw-r--r--arch/x86/kernel/microcode.c853
-rw-r--r--arch/x86/kernel/microcode_amd.c435
-rw-r--r--arch/x86/kernel/microcode_core.c508
-rw-r--r--arch/x86/kernel/microcode_intel.c480
-rw-r--r--arch/x86/kernel/mpparse.c2
-rw-r--r--arch/x86/kernel/msr.c4
-rw-r--r--arch/x86/kernel/nmi.c11
-rw-r--r--arch/x86/kernel/numaq_32.c7
-rw-r--r--arch/x86/kernel/olpc.c6
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c37
-rw-r--r--arch/x86/kernel/paravirt.c30
-rw-r--r--arch/x86/kernel/paravirt_patch_32.c2
-rw-r--r--arch/x86/kernel/pci-calgary_64.c36
-rw-r--r--arch/x86/kernel/pci-dma.c183
-rw-r--r--arch/x86/kernel/pci-gart_64.c150
-rw-r--r--arch/x86/kernel/pci-nommu.c10
-rw-r--r--arch/x86/kernel/pcspeaker.c13
-rw-r--r--arch/x86/kernel/process.c4
-rw-r--r--arch/x86/kernel/process_32.c105
-rw-r--r--arch/x86/kernel/process_64.c199
-rw-r--r--arch/x86/kernel/ptrace.c522
-rw-r--r--arch/x86/kernel/pvclock.c12
-rw-r--r--arch/x86/kernel/quirks.c44
-rw-r--r--arch/x86/kernel/reboot.c6
-rw-r--r--arch/x86/kernel/rtc.c42
-rw-r--r--arch/x86/kernel/setup.c236
-rw-r--r--arch/x86/kernel/setup_percpu.c26
-rw-r--r--arch/x86/kernel/sigframe.h19
-rw-r--r--arch/x86/kernel/signal_32.c273
-rw-r--r--arch/x86/kernel/signal_64.c362
-rw-r--r--arch/x86/kernel/smp.c6
-rw-r--r--arch/x86/kernel/smpboot.c230
-rw-r--r--arch/x86/kernel/summit_32.c2
-rw-r--r--arch/x86/kernel/sys_i386_32.c2
-rw-r--r--arch/x86/kernel/sys_x86_64.c44
-rw-r--r--arch/x86/kernel/syscall_64.c4
-rw-r--r--arch/x86/kernel/time_32.c8
-rw-r--r--arch/x86/kernel/time_64.c23
-rw-r--r--arch/x86/kernel/tlb_32.c8
-rw-r--r--arch/x86/kernel/tls.c1
-rw-r--r--arch/x86/kernel/traps.c (renamed from arch/x86/kernel/traps_32.c)896
-rw-r--r--arch/x86/kernel/traps_64.c1212
-rw-r--r--arch/x86/kernel/tsc.c290
-rw-r--r--arch/x86/kernel/uv_irq.c79
-rw-r--r--arch/x86/kernel/uv_sysfs.c72
-rw-r--r--arch/x86/kernel/visws_quirks.c48
-rw-r--r--arch/x86/kernel/vm86_32.c1
-rw-r--r--arch/x86/kernel/vmi_32.c14
-rw-r--r--arch/x86/kernel/vmiclock_32.c3
-rw-r--r--arch/x86/kernel/vmlinux_32.lds.S9
-rw-r--r--arch/x86/kernel/vmlinux_64.lds.S9
-rw-r--r--arch/x86/kernel/xsave.c345
139 files changed, 13770 insertions, 11806 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 3db651fc8ec5..d7e5a58ee22f 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -10,7 +10,7 @@ ifdef CONFIG_FTRACE
10# Do not profile debug and lowlevel utilities 10# Do not profile debug and lowlevel utilities
11CFLAGS_REMOVE_tsc.o = -pg 11CFLAGS_REMOVE_tsc.o = -pg
12CFLAGS_REMOVE_rtc.o = -pg 12CFLAGS_REMOVE_rtc.o = -pg
13CFLAGS_REMOVE_paravirt.o = -pg 13CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
14endif 14endif
15 15
16# 16#
@@ -23,7 +23,7 @@ CFLAGS_hpet.o := $(nostackp)
23CFLAGS_tsc.o := $(nostackp) 23CFLAGS_tsc.o := $(nostackp)
24 24
25obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o 25obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o
26obj-y += traps_$(BITS).o irq_$(BITS).o 26obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
27obj-y += time_$(BITS).o ioport.o ldt.o 27obj-y += time_$(BITS).o ioport.o ldt.o
28obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o 28obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o
29obj-$(CONFIG_X86_VISWS) += visws_quirks.o 29obj-$(CONFIG_X86_VISWS) += visws_quirks.o
@@ -38,7 +38,7 @@ obj-y += tsc.o io_delay.o rtc.o
38 38
39obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o 39obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
40obj-y += process.o 40obj-y += process.o
41obj-y += i387.o 41obj-y += i387.o xsave.o
42obj-y += ptrace.o 42obj-y += ptrace.o
43obj-y += ds.o 43obj-y += ds.o
44obj-$(CONFIG_X86_32) += tls.o 44obj-$(CONFIG_X86_32) += tls.o
@@ -51,7 +51,6 @@ obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o
51obj-$(CONFIG_MCA) += mca_32.o 51obj-$(CONFIG_MCA) += mca_32.o
52obj-$(CONFIG_X86_MSR) += msr.o 52obj-$(CONFIG_X86_MSR) += msr.o
53obj-$(CONFIG_X86_CPUID) += cpuid.o 53obj-$(CONFIG_X86_CPUID) += cpuid.o
54obj-$(CONFIG_MICROCODE) += microcode.o
55obj-$(CONFIG_PCI) += early-quirks.o 54obj-$(CONFIG_PCI) += early-quirks.o
56apm-y := apm_32.o 55apm-y := apm_32.o
57obj-$(CONFIG_APM) += apm.o 56obj-$(CONFIG_APM) += apm.o
@@ -61,14 +60,15 @@ obj-$(CONFIG_X86_32_SMP) += smpcommon.o
61obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o 60obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o
62obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o 61obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
63obj-$(CONFIG_X86_MPPARSE) += mpparse.o 62obj-$(CONFIG_X86_MPPARSE) += mpparse.o
64obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi.o 63obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
65obj-$(CONFIG_X86_IO_APIC) += io_apic_$(BITS).o 64obj-$(CONFIG_X86_IO_APIC) += io_apic.o
66obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o 65obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
67obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 66obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
68obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 67obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
69obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 68obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
70obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 69obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
71obj-$(CONFIG_X86_NUMAQ) += numaq_32.o 70obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
71obj-$(CONFIG_X86_ES7000) += es7000_32.o
72obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o 72obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o
73obj-y += vsmp_64.o 73obj-y += vsmp_64.o
74obj-$(CONFIG_KPROBES) += kprobes.o 74obj-$(CONFIG_KPROBES) += kprobes.o
@@ -89,7 +89,7 @@ obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
89obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o 89obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o
90obj-$(CONFIG_KVM_GUEST) += kvm.o 90obj-$(CONFIG_KVM_GUEST) += kvm.o
91obj-$(CONFIG_KVM_CLOCK) += kvmclock.o 91obj-$(CONFIG_KVM_CLOCK) += kvmclock.o
92obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o 92obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o paravirt-spinlocks.o
93obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o 93obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
94 94
95obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o 95obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
@@ -99,11 +99,18 @@ scx200-y += scx200_32.o
99 99
100obj-$(CONFIG_OLPC) += olpc.o 100obj-$(CONFIG_OLPC) += olpc.o
101 101
102microcode-y := microcode_core.o
103microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o
104microcode-$(CONFIG_MICROCODE_AMD) += microcode_amd.o
105obj-$(CONFIG_MICROCODE) += microcode.o
106
102### 107###
103# 64 bit specific files 108# 64 bit specific files
104ifeq ($(CONFIG_X86_64),y) 109ifeq ($(CONFIG_X86_64),y)
105 obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o 110 obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o
106 obj-y += bios_uv.o 111 obj-y += bios_uv.o uv_irq.o uv_sysfs.o
112 obj-y += genx2apic_cluster.o
113 obj-y += genx2apic_phys.o
107 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o 114 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
108 obj-$(CONFIG_AUDIT) += audit_64.o 115 obj-$(CONFIG_AUDIT) += audit_64.o
109 116
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 0c2742f8c4da..53b01a1ae10c 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -58,7 +58,6 @@ EXPORT_SYMBOL(acpi_disabled);
58#ifdef CONFIG_X86_64 58#ifdef CONFIG_X86_64
59 59
60#include <asm/proto.h> 60#include <asm/proto.h>
61#include <asm/genapic.h>
62 61
63#else /* X86 */ 62#else /* X86 */
64 63
@@ -97,7 +96,6 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
97#warning ACPI uses CMPXCHG, i486 and later hardware 96#warning ACPI uses CMPXCHG, i486 and later hardware
98#endif 97#endif
99 98
100
101/* -------------------------------------------------------------------------- 99/* --------------------------------------------------------------------------
102 Boot-time Configuration 100 Boot-time Configuration
103 -------------------------------------------------------------------------- */ 101 -------------------------------------------------------------------------- */
@@ -255,10 +253,8 @@ static void __cpuinit acpi_register_lapic(int id, u8 enabled)
255 return; 253 return;
256 } 254 }
257 255
258#ifdef CONFIG_X86_32
259 if (boot_cpu_physical_apicid != -1U) 256 if (boot_cpu_physical_apicid != -1U)
260 ver = apic_version[boot_cpu_physical_apicid]; 257 ver = apic_version[boot_cpu_physical_apicid];
261#endif
262 258
263 generic_processor_info(id, ver); 259 generic_processor_info(id, ver);
264} 260}
@@ -777,11 +773,9 @@ static void __init acpi_register_lapic_address(unsigned long address)
777 773
778 set_fixmap_nocache(FIX_APIC_BASE, address); 774 set_fixmap_nocache(FIX_APIC_BASE, address);
779 if (boot_cpu_physical_apicid == -1U) { 775 if (boot_cpu_physical_apicid == -1U) {
780 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); 776 boot_cpu_physical_apicid = read_apic_id();
781#ifdef CONFIG_X86_32
782 apic_version[boot_cpu_physical_apicid] = 777 apic_version[boot_cpu_physical_apicid] =
783 GET_APIC_VERSION(apic_read(APIC_LVR)); 778 GET_APIC_VERSION(apic_read(APIC_LVR));
784#endif
785 } 779 }
786} 780}
787 781
@@ -1263,7 +1257,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
1263 1257
1264 count = 1258 count =
1265 acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 1259 acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
1266 NR_IRQ_VECTORS); 1260 nr_irqs);
1267 if (count < 0) { 1261 if (count < 0) {
1268 printk(KERN_ERR PREFIX 1262 printk(KERN_ERR PREFIX
1269 "Error parsing interrupt source overrides entry\n"); 1263 "Error parsing interrupt source overrides entry\n");
@@ -1283,7 +1277,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
1283 1277
1284 count = 1278 count =
1285 acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 1279 acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
1286 NR_IRQ_VECTORS); 1280 nr_irqs);
1287 if (count < 0) { 1281 if (count < 0) {
1288 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); 1282 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
1289 /* TBD: Cleanup to allow fallback to MPS */ 1283 /* TBD: Cleanup to allow fallback to MPS */
@@ -1353,7 +1347,9 @@ static void __init acpi_process_madt(void)
1353 acpi_ioapic = 1; 1347 acpi_ioapic = 1;
1354 1348
1355 smp_found_config = 1; 1349 smp_found_config = 1;
1350#ifdef CONFIG_X86_32
1356 setup_apic_routing(); 1351 setup_apic_routing();
1352#endif
1357 } 1353 }
1358 } 1354 }
1359 if (error == -EINVAL) { 1355 if (error == -EINVAL) {
@@ -1423,8 +1419,16 @@ static int __init force_acpi_ht(const struct dmi_system_id *d)
1423 */ 1419 */
1424static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) 1420static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
1425{ 1421{
1426 pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n", d->ident); 1422 /*
1427 acpi_skip_timer_override = 1; 1423 * The ati_ixp4x0_rev() early PCI quirk should have set
1424 * the acpi_skip_timer_override flag already:
1425 */
1426 if (!acpi_skip_timer_override) {
1427 WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n");
1428 pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n",
1429 d->ident);
1430 acpi_skip_timer_override = 1;
1431 }
1428 return 0; 1432 return 0;
1429} 1433}
1430 1434
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 55d10cbe65b1..806b4e9051b4 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -10,6 +10,7 @@
10#include <linux/dmi.h> 10#include <linux/dmi.h>
11#include <linux/cpumask.h> 11#include <linux/cpumask.h>
12#include <asm/segment.h> 12#include <asm/segment.h>
13#include <asm/desc.h>
13 14
14#include "realmode/wakeup.h" 15#include "realmode/wakeup.h"
15#include "sleep.h" 16#include "sleep.h"
@@ -98,6 +99,8 @@ int acpi_save_state_mem(void)
98 header->trampoline_segment = setup_trampoline() >> 4; 99 header->trampoline_segment = setup_trampoline() >> 4;
99#ifdef CONFIG_SMP 100#ifdef CONFIG_SMP
100 stack_start.sp = temp_stack + sizeof(temp_stack); 101 stack_start.sp = temp_stack + sizeof(temp_stack);
102 early_gdt_descr.address =
103 (unsigned long)get_cpu_gdt_table(smp_processor_id());
101#endif 104#endif
102 initial_code = (unsigned long)wakeup_long64; 105 initial_code = (unsigned long)wakeup_long64;
103 saved_magic = 0x123456789abcdef0; 106 saved_magic = 0x123456789abcdef0;
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 65a0c1b48696..a84ac7b570e6 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -231,25 +231,25 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
231 continue; 231 continue;
232 if (*ptr > text_end) 232 if (*ptr > text_end)
233 continue; 233 continue;
234 text_poke(*ptr, ((unsigned char []){0xf0}), 1); /* add lock prefix */ 234 /* turn DS segment override prefix into lock prefix */
235 text_poke(*ptr, ((unsigned char []){0xf0}), 1);
235 }; 236 };
236} 237}
237 238
238static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) 239static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
239{ 240{
240 u8 **ptr; 241 u8 **ptr;
241 char insn[1];
242 242
243 if (noreplace_smp) 243 if (noreplace_smp)
244 return; 244 return;
245 245
246 add_nops(insn, 1);
247 for (ptr = start; ptr < end; ptr++) { 246 for (ptr = start; ptr < end; ptr++) {
248 if (*ptr < text) 247 if (*ptr < text)
249 continue; 248 continue;
250 if (*ptr > text_end) 249 if (*ptr > text_end)
251 continue; 250 continue;
252 text_poke(*ptr, insn, 1); 251 /* turn lock prefix into DS segment override prefix */
252 text_poke(*ptr, ((unsigned char []){0x3E}), 1);
253 }; 253 };
254} 254}
255 255
@@ -444,7 +444,7 @@ void __init alternative_instructions(void)
444 _text, _etext); 444 _text, _etext);
445 445
446 /* Only switch to UP mode if we don't immediately boot others */ 446 /* Only switch to UP mode if we don't immediately boot others */
447 if (num_possible_cpus() == 1 || setup_max_cpus <= 1) 447 if (num_present_cpus() == 1 || setup_max_cpus <= 1)
448 alternatives_smp_switch(0); 448 alternatives_smp_switch(0);
449 } 449 }
450#endif 450#endif
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 042fdc27bc92..a8fd9ebdc8e2 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -33,6 +33,10 @@
33 33
34static DEFINE_RWLOCK(amd_iommu_devtable_lock); 34static DEFINE_RWLOCK(amd_iommu_devtable_lock);
35 35
36/* A list of preallocated protection domains */
37static LIST_HEAD(iommu_pd_list);
38static DEFINE_SPINLOCK(iommu_pd_list_lock);
39
36/* 40/*
37 * general struct to manage commands send to an IOMMU 41 * general struct to manage commands send to an IOMMU
38 */ 42 */
@@ -51,6 +55,102 @@ static int iommu_has_npcache(struct amd_iommu *iommu)
51 55
52/**************************************************************************** 56/****************************************************************************
53 * 57 *
58 * Interrupt handling functions
59 *
60 ****************************************************************************/
61
62static void iommu_print_event(void *__evt)
63{
64 u32 *event = __evt;
65 int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
66 int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
67 int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
68 int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
69 u64 address = (u64)(((u64)event[3]) << 32) | event[2];
70
71 printk(KERN_ERR "AMD IOMMU: Event logged [");
72
73 switch (type) {
74 case EVENT_TYPE_ILL_DEV:
75 printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
76 "address=0x%016llx flags=0x%04x]\n",
77 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
78 address, flags);
79 break;
80 case EVENT_TYPE_IO_FAULT:
81 printk("IO_PAGE_FAULT device=%02x:%02x.%x "
82 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
83 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
84 domid, address, flags);
85 break;
86 case EVENT_TYPE_DEV_TAB_ERR:
87 printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
88 "address=0x%016llx flags=0x%04x]\n",
89 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
90 address, flags);
91 break;
92 case EVENT_TYPE_PAGE_TAB_ERR:
93 printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
94 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
95 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
96 domid, address, flags);
97 break;
98 case EVENT_TYPE_ILL_CMD:
99 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
100 break;
101 case EVENT_TYPE_CMD_HARD_ERR:
102 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
103 "flags=0x%04x]\n", address, flags);
104 break;
105 case EVENT_TYPE_IOTLB_INV_TO:
106 printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
107 "address=0x%016llx]\n",
108 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
109 address);
110 break;
111 case EVENT_TYPE_INV_DEV_REQ:
112 printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
113 "address=0x%016llx flags=0x%04x]\n",
114 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
115 address, flags);
116 break;
117 default:
118 printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
119 }
120}
121
122static void iommu_poll_events(struct amd_iommu *iommu)
123{
124 u32 head, tail;
125 unsigned long flags;
126
127 spin_lock_irqsave(&iommu->lock, flags);
128
129 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
130 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
131
132 while (head != tail) {
133 iommu_print_event(iommu->evt_buf + head);
134 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
135 }
136
137 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
138
139 spin_unlock_irqrestore(&iommu->lock, flags);
140}
141
142irqreturn_t amd_iommu_int_handler(int irq, void *data)
143{
144 struct amd_iommu *iommu;
145
146 list_for_each_entry(iommu, &amd_iommu_list, list)
147 iommu_poll_events(iommu);
148
149 return IRQ_HANDLED;
150}
151
152/****************************************************************************
153 *
54 * IOMMU command queuing functions 154 * IOMMU command queuing functions
55 * 155 *
56 ****************************************************************************/ 156 ****************************************************************************/
@@ -195,7 +295,7 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
195 u64 address, size_t size) 295 u64 address, size_t size)
196{ 296{
197 int s = 0; 297 int s = 0;
198 unsigned pages = iommu_num_pages(address, size); 298 unsigned pages = iommu_num_pages(address, size, PAGE_SIZE);
199 299
200 address &= PAGE_MASK; 300 address &= PAGE_MASK;
201 301
@@ -213,6 +313,14 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
213 return 0; 313 return 0;
214} 314}
215 315
316/* Flush the whole IO/TLB for a given protection domain */
317static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
318{
319 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
320
321 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
322}
323
216/**************************************************************************** 324/****************************************************************************
217 * 325 *
218 * The functions below are used the create the page table mappings for 326 * The functions below are used the create the page table mappings for
@@ -372,11 +480,6 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
372 * efficient allocator. 480 * efficient allocator.
373 * 481 *
374 ****************************************************************************/ 482 ****************************************************************************/
375static unsigned long dma_mask_to_pages(unsigned long mask)
376{
377 return (mask >> PAGE_SHIFT) +
378 (PAGE_ALIGN(mask & ~PAGE_MASK) >> PAGE_SHIFT);
379}
380 483
381/* 484/*
382 * The address allocator core function. 485 * The address allocator core function.
@@ -385,25 +488,31 @@ static unsigned long dma_mask_to_pages(unsigned long mask)
385 */ 488 */
386static unsigned long dma_ops_alloc_addresses(struct device *dev, 489static unsigned long dma_ops_alloc_addresses(struct device *dev,
387 struct dma_ops_domain *dom, 490 struct dma_ops_domain *dom,
388 unsigned int pages) 491 unsigned int pages,
492 unsigned long align_mask,
493 u64 dma_mask)
389{ 494{
390 unsigned long limit = dma_mask_to_pages(*dev->dma_mask); 495 unsigned long limit;
391 unsigned long address; 496 unsigned long address;
392 unsigned long size = dom->aperture_size >> PAGE_SHIFT;
393 unsigned long boundary_size; 497 unsigned long boundary_size;
394 498
395 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 499 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
396 PAGE_SIZE) >> PAGE_SHIFT; 500 PAGE_SIZE) >> PAGE_SHIFT;
397 limit = limit < size ? limit : size; 501 limit = iommu_device_max_index(dom->aperture_size >> PAGE_SHIFT, 0,
502 dma_mask >> PAGE_SHIFT);
398 503
399 if (dom->next_bit >= limit) 504 if (dom->next_bit >= limit) {
400 dom->next_bit = 0; 505 dom->next_bit = 0;
506 dom->need_flush = true;
507 }
401 508
402 address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages, 509 address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages,
403 0 , boundary_size, 0); 510 0 , boundary_size, align_mask);
404 if (address == -1) 511 if (address == -1) {
405 address = iommu_area_alloc(dom->bitmap, limit, 0, pages, 512 address = iommu_area_alloc(dom->bitmap, limit, 0, pages,
406 0, boundary_size, 0); 513 0, boundary_size, align_mask);
514 dom->need_flush = true;
515 }
407 516
408 if (likely(address != -1)) { 517 if (likely(address != -1)) {
409 dom->next_bit = address + pages; 518 dom->next_bit = address + pages;
@@ -469,7 +578,7 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
469 if (start_page + pages > last_page) 578 if (start_page + pages > last_page)
470 pages = last_page - start_page; 579 pages = last_page - start_page;
471 580
472 set_bit_string(dom->bitmap, start_page, pages); 581 iommu_area_reserve(dom->bitmap, start_page, pages);
473} 582}
474 583
475static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom) 584static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
@@ -563,12 +672,16 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
563 dma_dom->bitmap[0] = 1; 672 dma_dom->bitmap[0] = 1;
564 dma_dom->next_bit = 0; 673 dma_dom->next_bit = 0;
565 674
675 dma_dom->need_flush = false;
676 dma_dom->target_dev = 0xffff;
677
566 /* Intialize the exclusion range if necessary */ 678 /* Intialize the exclusion range if necessary */
567 if (iommu->exclusion_start && 679 if (iommu->exclusion_start &&
568 iommu->exclusion_start < dma_dom->aperture_size) { 680 iommu->exclusion_start < dma_dom->aperture_size) {
569 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; 681 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
570 int pages = iommu_num_pages(iommu->exclusion_start, 682 int pages = iommu_num_pages(iommu->exclusion_start,
571 iommu->exclusion_length); 683 iommu->exclusion_length,
684 PAGE_SIZE);
572 dma_ops_reserve_addresses(dma_dom, startpage, pages); 685 dma_ops_reserve_addresses(dma_dom, startpage, pages);
573 } 686 }
574 687
@@ -633,12 +746,13 @@ static void set_device_domain(struct amd_iommu *iommu,
633 746
634 u64 pte_root = virt_to_phys(domain->pt_root); 747 u64 pte_root = virt_to_phys(domain->pt_root);
635 748
636 pte_root |= (domain->mode & 0x07) << 9; 749 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
637 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | 2; 750 << DEV_ENTRY_MODE_SHIFT;
751 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
638 752
639 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 753 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
640 amd_iommu_dev_table[devid].data[0] = pte_root; 754 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
641 amd_iommu_dev_table[devid].data[1] = pte_root >> 32; 755 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
642 amd_iommu_dev_table[devid].data[2] = domain->id; 756 amd_iommu_dev_table[devid].data[2] = domain->id;
643 757
644 amd_iommu_pd_table[devid] = domain; 758 amd_iommu_pd_table[devid] = domain;
@@ -656,6 +770,45 @@ static void set_device_domain(struct amd_iommu *iommu,
656 *****************************************************************************/ 770 *****************************************************************************/
657 771
658/* 772/*
773 * This function checks if the driver got a valid device from the caller to
774 * avoid dereferencing invalid pointers.
775 */
776static bool check_device(struct device *dev)
777{
778 if (!dev || !dev->dma_mask)
779 return false;
780
781 return true;
782}
783
784/*
785 * In this function the list of preallocated protection domains is traversed to
786 * find the domain for a specific device
787 */
788static struct dma_ops_domain *find_protection_domain(u16 devid)
789{
790 struct dma_ops_domain *entry, *ret = NULL;
791 unsigned long flags;
792
793 if (list_empty(&iommu_pd_list))
794 return NULL;
795
796 spin_lock_irqsave(&iommu_pd_list_lock, flags);
797
798 list_for_each_entry(entry, &iommu_pd_list, list) {
799 if (entry->target_dev == devid) {
800 ret = entry;
801 list_del(&ret->list);
802 break;
803 }
804 }
805
806 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
807
808 return ret;
809}
810
811/*
659 * In the dma_ops path we only have the struct device. This function 812 * In the dma_ops path we only have the struct device. This function
660 * finds the corresponding IOMMU, the protection domain and the 813 * finds the corresponding IOMMU, the protection domain and the
661 * requestor id for a given device. 814 * requestor id for a given device.
@@ -671,27 +824,30 @@ static int get_device_resources(struct device *dev,
671 struct pci_dev *pcidev; 824 struct pci_dev *pcidev;
672 u16 _bdf; 825 u16 _bdf;
673 826
674 BUG_ON(!dev || dev->bus != &pci_bus_type || !dev->dma_mask); 827 *iommu = NULL;
828 *domain = NULL;
829 *bdf = 0xffff;
830
831 if (dev->bus != &pci_bus_type)
832 return 0;
675 833
676 pcidev = to_pci_dev(dev); 834 pcidev = to_pci_dev(dev);
677 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn); 835 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
678 836
679 /* device not translated by any IOMMU in the system? */ 837 /* device not translated by any IOMMU in the system? */
680 if (_bdf > amd_iommu_last_bdf) { 838 if (_bdf > amd_iommu_last_bdf)
681 *iommu = NULL;
682 *domain = NULL;
683 *bdf = 0xffff;
684 return 0; 839 return 0;
685 }
686 840
687 *bdf = amd_iommu_alias_table[_bdf]; 841 *bdf = amd_iommu_alias_table[_bdf];
688 842
689 *iommu = amd_iommu_rlookup_table[*bdf]; 843 *iommu = amd_iommu_rlookup_table[*bdf];
690 if (*iommu == NULL) 844 if (*iommu == NULL)
691 return 0; 845 return 0;
692 dma_dom = (*iommu)->default_dom;
693 *domain = domain_for_device(*bdf); 846 *domain = domain_for_device(*bdf);
694 if (*domain == NULL) { 847 if (*domain == NULL) {
848 dma_dom = find_protection_domain(*bdf);
849 if (!dma_dom)
850 dma_dom = (*iommu)->default_dom;
695 *domain = &dma_dom->domain; 851 *domain = &dma_dom->domain;
696 set_device_domain(*iommu, *domain, *bdf); 852 set_device_domain(*iommu, *domain, *bdf);
697 printk(KERN_INFO "AMD IOMMU: Using protection domain %d for " 853 printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
@@ -770,17 +926,24 @@ static dma_addr_t __map_single(struct device *dev,
770 struct dma_ops_domain *dma_dom, 926 struct dma_ops_domain *dma_dom,
771 phys_addr_t paddr, 927 phys_addr_t paddr,
772 size_t size, 928 size_t size,
773 int dir) 929 int dir,
930 bool align,
931 u64 dma_mask)
774{ 932{
775 dma_addr_t offset = paddr & ~PAGE_MASK; 933 dma_addr_t offset = paddr & ~PAGE_MASK;
776 dma_addr_t address, start; 934 dma_addr_t address, start;
777 unsigned int pages; 935 unsigned int pages;
936 unsigned long align_mask = 0;
778 int i; 937 int i;
779 938
780 pages = iommu_num_pages(paddr, size); 939 pages = iommu_num_pages(paddr, size, PAGE_SIZE);
781 paddr &= PAGE_MASK; 940 paddr &= PAGE_MASK;
782 941
783 address = dma_ops_alloc_addresses(dev, dma_dom, pages); 942 if (align)
943 align_mask = (1UL << get_order(size)) - 1;
944
945 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
946 dma_mask);
784 if (unlikely(address == bad_dma_address)) 947 if (unlikely(address == bad_dma_address))
785 goto out; 948 goto out;
786 949
@@ -792,6 +955,12 @@ static dma_addr_t __map_single(struct device *dev,
792 } 955 }
793 address += offset; 956 address += offset;
794 957
958 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
959 iommu_flush_tlb(iommu, dma_dom->domain.id);
960 dma_dom->need_flush = false;
961 } else if (unlikely(iommu_has_npcache(iommu)))
962 iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
963
795out: 964out:
796 return address; 965 return address;
797} 966}
@@ -812,7 +981,7 @@ static void __unmap_single(struct amd_iommu *iommu,
812 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) 981 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
813 return; 982 return;
814 983
815 pages = iommu_num_pages(dma_addr, size); 984 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
816 dma_addr &= PAGE_MASK; 985 dma_addr &= PAGE_MASK;
817 start = dma_addr; 986 start = dma_addr;
818 987
@@ -822,6 +991,9 @@ static void __unmap_single(struct amd_iommu *iommu,
822 } 991 }
823 992
824 dma_ops_free_addresses(dma_dom, dma_addr, pages); 993 dma_ops_free_addresses(dma_dom, dma_addr, pages);
994
995 if (amd_iommu_unmap_flush)
996 iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
825} 997}
826 998
827/* 999/*
@@ -835,6 +1007,12 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
835 struct protection_domain *domain; 1007 struct protection_domain *domain;
836 u16 devid; 1008 u16 devid;
837 dma_addr_t addr; 1009 dma_addr_t addr;
1010 u64 dma_mask;
1011
1012 if (!check_device(dev))
1013 return bad_dma_address;
1014
1015 dma_mask = *dev->dma_mask;
838 1016
839 get_device_resources(dev, &iommu, &domain, &devid); 1017 get_device_resources(dev, &iommu, &domain, &devid);
840 1018
@@ -843,14 +1021,12 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
843 return (dma_addr_t)paddr; 1021 return (dma_addr_t)paddr;
844 1022
845 spin_lock_irqsave(&domain->lock, flags); 1023 spin_lock_irqsave(&domain->lock, flags);
846 addr = __map_single(dev, iommu, domain->priv, paddr, size, dir); 1024 addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
1025 dma_mask);
847 if (addr == bad_dma_address) 1026 if (addr == bad_dma_address)
848 goto out; 1027 goto out;
849 1028
850 if (iommu_has_npcache(iommu)) 1029 if (unlikely(iommu->need_sync))
851 iommu_flush_pages(iommu, domain->id, addr, size);
852
853 if (iommu->need_sync)
854 iommu_completion_wait(iommu); 1030 iommu_completion_wait(iommu);
855 1031
856out: 1032out:
@@ -870,7 +1046,8 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
870 struct protection_domain *domain; 1046 struct protection_domain *domain;
871 u16 devid; 1047 u16 devid;
872 1048
873 if (!get_device_resources(dev, &iommu, &domain, &devid)) 1049 if (!check_device(dev) ||
1050 !get_device_resources(dev, &iommu, &domain, &devid))
874 /* device not handled by any AMD IOMMU */ 1051 /* device not handled by any AMD IOMMU */
875 return; 1052 return;
876 1053
@@ -878,9 +1055,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
878 1055
879 __unmap_single(iommu, domain->priv, dma_addr, size, dir); 1056 __unmap_single(iommu, domain->priv, dma_addr, size, dir);
880 1057
881 iommu_flush_pages(iommu, domain->id, dma_addr, size); 1058 if (unlikely(iommu->need_sync))
882
883 if (iommu->need_sync)
884 iommu_completion_wait(iommu); 1059 iommu_completion_wait(iommu);
885 1060
886 spin_unlock_irqrestore(&domain->lock, flags); 1061 spin_unlock_irqrestore(&domain->lock, flags);
@@ -919,6 +1094,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
919 struct scatterlist *s; 1094 struct scatterlist *s;
920 phys_addr_t paddr; 1095 phys_addr_t paddr;
921 int mapped_elems = 0; 1096 int mapped_elems = 0;
1097 u64 dma_mask;
1098
1099 if (!check_device(dev))
1100 return 0;
1101
1102 dma_mask = *dev->dma_mask;
922 1103
923 get_device_resources(dev, &iommu, &domain, &devid); 1104 get_device_resources(dev, &iommu, &domain, &devid);
924 1105
@@ -931,19 +1112,17 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
931 paddr = sg_phys(s); 1112 paddr = sg_phys(s);
932 1113
933 s->dma_address = __map_single(dev, iommu, domain->priv, 1114 s->dma_address = __map_single(dev, iommu, domain->priv,
934 paddr, s->length, dir); 1115 paddr, s->length, dir, false,
1116 dma_mask);
935 1117
936 if (s->dma_address) { 1118 if (s->dma_address) {
937 s->dma_length = s->length; 1119 s->dma_length = s->length;
938 mapped_elems++; 1120 mapped_elems++;
939 } else 1121 } else
940 goto unmap; 1122 goto unmap;
941 if (iommu_has_npcache(iommu))
942 iommu_flush_pages(iommu, domain->id, s->dma_address,
943 s->dma_length);
944 } 1123 }
945 1124
946 if (iommu->need_sync) 1125 if (unlikely(iommu->need_sync))
947 iommu_completion_wait(iommu); 1126 iommu_completion_wait(iommu);
948 1127
949out: 1128out:
@@ -977,7 +1156,8 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
977 u16 devid; 1156 u16 devid;
978 int i; 1157 int i;
979 1158
980 if (!get_device_resources(dev, &iommu, &domain, &devid)) 1159 if (!check_device(dev) ||
1160 !get_device_resources(dev, &iommu, &domain, &devid))
981 return; 1161 return;
982 1162
983 spin_lock_irqsave(&domain->lock, flags); 1163 spin_lock_irqsave(&domain->lock, flags);
@@ -985,12 +1165,10 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
985 for_each_sg(sglist, s, nelems, i) { 1165 for_each_sg(sglist, s, nelems, i) {
986 __unmap_single(iommu, domain->priv, s->dma_address, 1166 __unmap_single(iommu, domain->priv, s->dma_address,
987 s->dma_length, dir); 1167 s->dma_length, dir);
988 iommu_flush_pages(iommu, domain->id, s->dma_address,
989 s->dma_length);
990 s->dma_address = s->dma_length = 0; 1168 s->dma_address = s->dma_length = 0;
991 } 1169 }
992 1170
993 if (iommu->need_sync) 1171 if (unlikely(iommu->need_sync))
994 iommu_completion_wait(iommu); 1172 iommu_completion_wait(iommu);
995 1173
996 spin_unlock_irqrestore(&domain->lock, flags); 1174 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1008,25 +1186,33 @@ static void *alloc_coherent(struct device *dev, size_t size,
1008 struct protection_domain *domain; 1186 struct protection_domain *domain;
1009 u16 devid; 1187 u16 devid;
1010 phys_addr_t paddr; 1188 phys_addr_t paddr;
1189 u64 dma_mask = dev->coherent_dma_mask;
1190
1191 if (!check_device(dev))
1192 return NULL;
1193
1194 if (!get_device_resources(dev, &iommu, &domain, &devid))
1195 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
1011 1196
1197 flag |= __GFP_ZERO;
1012 virt_addr = (void *)__get_free_pages(flag, get_order(size)); 1198 virt_addr = (void *)__get_free_pages(flag, get_order(size));
1013 if (!virt_addr) 1199 if (!virt_addr)
1014 return 0; 1200 return 0;
1015 1201
1016 memset(virt_addr, 0, size);
1017 paddr = virt_to_phys(virt_addr); 1202 paddr = virt_to_phys(virt_addr);
1018 1203
1019 get_device_resources(dev, &iommu, &domain, &devid);
1020
1021 if (!iommu || !domain) { 1204 if (!iommu || !domain) {
1022 *dma_addr = (dma_addr_t)paddr; 1205 *dma_addr = (dma_addr_t)paddr;
1023 return virt_addr; 1206 return virt_addr;
1024 } 1207 }
1025 1208
1209 if (!dma_mask)
1210 dma_mask = *dev->dma_mask;
1211
1026 spin_lock_irqsave(&domain->lock, flags); 1212 spin_lock_irqsave(&domain->lock, flags);
1027 1213
1028 *dma_addr = __map_single(dev, iommu, domain->priv, paddr, 1214 *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
1029 size, DMA_BIDIRECTIONAL); 1215 size, DMA_BIDIRECTIONAL, true, dma_mask);
1030 1216
1031 if (*dma_addr == bad_dma_address) { 1217 if (*dma_addr == bad_dma_address) {
1032 free_pages((unsigned long)virt_addr, get_order(size)); 1218 free_pages((unsigned long)virt_addr, get_order(size));
@@ -1034,10 +1220,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
1034 goto out; 1220 goto out;
1035 } 1221 }
1036 1222
1037 if (iommu_has_npcache(iommu)) 1223 if (unlikely(iommu->need_sync))
1038 iommu_flush_pages(iommu, domain->id, *dma_addr, size);
1039
1040 if (iommu->need_sync)
1041 iommu_completion_wait(iommu); 1224 iommu_completion_wait(iommu);
1042 1225
1043out: 1226out:
@@ -1048,8 +1231,6 @@ out:
1048 1231
1049/* 1232/*
1050 * The exported free_coherent function for dma_ops. 1233 * The exported free_coherent function for dma_ops.
1051 * FIXME: fix the generic x86 DMA layer so that it actually calls that
1052 * function.
1053 */ 1234 */
1054static void free_coherent(struct device *dev, size_t size, 1235static void free_coherent(struct device *dev, size_t size,
1055 void *virt_addr, dma_addr_t dma_addr) 1236 void *virt_addr, dma_addr_t dma_addr)
@@ -1059,6 +1240,9 @@ static void free_coherent(struct device *dev, size_t size,
1059 struct protection_domain *domain; 1240 struct protection_domain *domain;
1060 u16 devid; 1241 u16 devid;
1061 1242
1243 if (!check_device(dev))
1244 return;
1245
1062 get_device_resources(dev, &iommu, &domain, &devid); 1246 get_device_resources(dev, &iommu, &domain, &devid);
1063 1247
1064 if (!iommu || !domain) 1248 if (!iommu || !domain)
@@ -1067,9 +1251,8 @@ static void free_coherent(struct device *dev, size_t size,
1067 spin_lock_irqsave(&domain->lock, flags); 1251 spin_lock_irqsave(&domain->lock, flags);
1068 1252
1069 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 1253 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
1070 iommu_flush_pages(iommu, domain->id, dma_addr, size);
1071 1254
1072 if (iommu->need_sync) 1255 if (unlikely(iommu->need_sync))
1073 iommu_completion_wait(iommu); 1256 iommu_completion_wait(iommu);
1074 1257
1075 spin_unlock_irqrestore(&domain->lock, flags); 1258 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1079,6 +1262,30 @@ free_mem:
1079} 1262}
1080 1263
1081/* 1264/*
1265 * This function is called by the DMA layer to find out if we can handle a
1266 * particular device. It is part of the dma_ops.
1267 */
1268static int amd_iommu_dma_supported(struct device *dev, u64 mask)
1269{
1270 u16 bdf;
1271 struct pci_dev *pcidev;
1272
1273 /* No device or no PCI device */
1274 if (!dev || dev->bus != &pci_bus_type)
1275 return 0;
1276
1277 pcidev = to_pci_dev(dev);
1278
1279 bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
1280
1281 /* Out of our scope? */
1282 if (bdf > amd_iommu_last_bdf)
1283 return 0;
1284
1285 return 1;
1286}
1287
1288/*
1082 * The function for pre-allocating protection domains. 1289 * The function for pre-allocating protection domains.
1083 * 1290 *
1084 * If the driver core informs the DMA layer if a driver grabs a device 1291 * If the driver core informs the DMA layer if a driver grabs a device
@@ -1107,10 +1314,9 @@ void prealloc_protection_domains(void)
1107 if (!dma_dom) 1314 if (!dma_dom)
1108 continue; 1315 continue;
1109 init_unity_mappings_for_device(dma_dom, devid); 1316 init_unity_mappings_for_device(dma_dom, devid);
1110 set_device_domain(iommu, &dma_dom->domain, devid); 1317 dma_dom->target_dev = devid;
1111 printk(KERN_INFO "AMD IOMMU: Allocated domain %d for device ", 1318
1112 dma_dom->domain.id); 1319 list_add_tail(&dma_dom->list, &iommu_pd_list);
1113 print_devid(devid, 1);
1114 } 1320 }
1115} 1321}
1116 1322
@@ -1121,6 +1327,7 @@ static struct dma_mapping_ops amd_iommu_dma_ops = {
1121 .unmap_single = unmap_single, 1327 .unmap_single = unmap_single,
1122 .map_sg = map_sg, 1328 .map_sg = map_sg,
1123 .unmap_sg = unmap_sg, 1329 .unmap_sg = unmap_sg,
1330 .dma_supported = amd_iommu_dma_supported,
1124}; 1331};
1125 1332
1126/* 1333/*
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index a69cc0f52042..4cd8083c58be 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -22,6 +22,8 @@
22#include <linux/gfp.h> 22#include <linux/gfp.h>
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/sysdev.h> 24#include <linux/sysdev.h>
25#include <linux/interrupt.h>
26#include <linux/msi.h>
25#include <asm/pci-direct.h> 27#include <asm/pci-direct.h>
26#include <asm/amd_iommu_types.h> 28#include <asm/amd_iommu_types.h>
27#include <asm/amd_iommu.h> 29#include <asm/amd_iommu.h>
@@ -30,7 +32,6 @@
30/* 32/*
31 * definitions for the ACPI scanning code 33 * definitions for the ACPI scanning code
32 */ 34 */
33#define PCI_BUS(x) (((x) >> 8) & 0xff)
34#define IVRS_HEADER_LENGTH 48 35#define IVRS_HEADER_LENGTH 48
35 36
36#define ACPI_IVHD_TYPE 0x10 37#define ACPI_IVHD_TYPE 0x10
@@ -121,6 +122,7 @@ LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
121 we find in ACPI */ 122 we find in ACPI */
122unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ 123unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
123int amd_iommu_isolate; /* if 1, device isolation is enabled */ 124int amd_iommu_isolate; /* if 1, device isolation is enabled */
125bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
124 126
125LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 127LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
126 system */ 128 system */
@@ -234,7 +236,7 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
234{ 236{
235 u32 ctrl; 237 u32 ctrl;
236 238
237 ctrl = (u64)readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); 239 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
238 ctrl &= ~(1 << bit); 240 ctrl &= ~(1 << bit);
239 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 241 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
240} 242}
@@ -242,13 +244,23 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
242/* Function to enable the hardware */ 244/* Function to enable the hardware */
243void __init iommu_enable(struct amd_iommu *iommu) 245void __init iommu_enable(struct amd_iommu *iommu)
244{ 246{
245 printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at "); 247 printk(KERN_INFO "AMD IOMMU: Enabling IOMMU "
246 print_devid(iommu->devid, 0); 248 "at %02x:%02x.%x cap 0x%hx\n",
247 printk(" cap 0x%hx\n", iommu->cap_ptr); 249 iommu->dev->bus->number,
250 PCI_SLOT(iommu->dev->devfn),
251 PCI_FUNC(iommu->dev->devfn),
252 iommu->cap_ptr);
248 253
249 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 254 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
250} 255}
251 256
257/* Function to enable IOMMU event logging and event interrupts */
258void __init iommu_enable_event_logging(struct amd_iommu *iommu)
259{
260 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
261 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
262}
263
252/* 264/*
253 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in 265 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
254 * the system has one. 266 * the system has one.
@@ -286,6 +298,14 @@ static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
286 ****************************************************************************/ 298 ****************************************************************************/
287 299
288/* 300/*
301 * This function calculates the length of a given IVHD entry
302 */
303static inline int ivhd_entry_length(u8 *ivhd)
304{
305 return 0x04 << (*ivhd >> 6);
306}
307
308/*
289 * This function reads the last device id the IOMMU has to handle from the PCI 309 * This function reads the last device id the IOMMU has to handle from the PCI
290 * capability header for this IOMMU 310 * capability header for this IOMMU
291 */ 311 */
@@ -329,7 +349,7 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
329 default: 349 default:
330 break; 350 break;
331 } 351 }
332 p += 0x04 << (*p >> 6); 352 p += ivhd_entry_length(p);
333 } 353 }
334 354
335 WARN_ON(p != end); 355 WARN_ON(p != end);
@@ -414,7 +434,32 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
414 434
415static void __init free_command_buffer(struct amd_iommu *iommu) 435static void __init free_command_buffer(struct amd_iommu *iommu)
416{ 436{
417 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); 437 free_pages((unsigned long)iommu->cmd_buf,
438 get_order(iommu->cmd_buf_size));
439}
440
441/* allocates the memory where the IOMMU will log its events to */
442static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
443{
444 u64 entry;
445 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
446 get_order(EVT_BUFFER_SIZE));
447
448 if (iommu->evt_buf == NULL)
449 return NULL;
450
451 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
452 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
453 &entry, sizeof(entry));
454
455 iommu->evt_buf_size = EVT_BUFFER_SIZE;
456
457 return iommu->evt_buf;
458}
459
460static void __init free_event_buffer(struct amd_iommu *iommu)
461{
462 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
418} 463}
419 464
420/* sets a specific bit in the device table entry. */ 465/* sets a specific bit in the device table entry. */
@@ -487,19 +532,21 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
487 */ 532 */
488static void __init init_iommu_from_pci(struct amd_iommu *iommu) 533static void __init init_iommu_from_pci(struct amd_iommu *iommu)
489{ 534{
490 int bus = PCI_BUS(iommu->devid);
491 int dev = PCI_SLOT(iommu->devid);
492 int fn = PCI_FUNC(iommu->devid);
493 int cap_ptr = iommu->cap_ptr; 535 int cap_ptr = iommu->cap_ptr;
494 u32 range; 536 u32 range, misc;
495 537
496 iommu->cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_CAP_HDR_OFFSET); 538 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
539 &iommu->cap);
540 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
541 &range);
542 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
543 &misc);
497 544
498 range = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
499 iommu->first_device = calc_devid(MMIO_GET_BUS(range), 545 iommu->first_device = calc_devid(MMIO_GET_BUS(range),
500 MMIO_GET_FD(range)); 546 MMIO_GET_FD(range));
501 iommu->last_device = calc_devid(MMIO_GET_BUS(range), 547 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
502 MMIO_GET_LD(range)); 548 MMIO_GET_LD(range));
549 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
503} 550}
504 551
505/* 552/*
@@ -604,7 +651,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
604 break; 651 break;
605 } 652 }
606 653
607 p += 0x04 << (e->type >> 6); 654 p += ivhd_entry_length(p);
608 } 655 }
609} 656}
610 657
@@ -622,6 +669,7 @@ static int __init init_iommu_devices(struct amd_iommu *iommu)
622static void __init free_iommu_one(struct amd_iommu *iommu) 669static void __init free_iommu_one(struct amd_iommu *iommu)
623{ 670{
624 free_command_buffer(iommu); 671 free_command_buffer(iommu);
672 free_event_buffer(iommu);
625 iommu_unmap_mmio_space(iommu); 673 iommu_unmap_mmio_space(iommu);
626} 674}
627 675
@@ -649,8 +697,12 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
649 /* 697 /*
650 * Copy data from ACPI table entry to the iommu struct 698 * Copy data from ACPI table entry to the iommu struct
651 */ 699 */
652 iommu->devid = h->devid; 700 iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff);
701 if (!iommu->dev)
702 return 1;
703
653 iommu->cap_ptr = h->cap_ptr; 704 iommu->cap_ptr = h->cap_ptr;
705 iommu->pci_seg = h->pci_seg;
654 iommu->mmio_phys = h->mmio_phys; 706 iommu->mmio_phys = h->mmio_phys;
655 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); 707 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
656 if (!iommu->mmio_base) 708 if (!iommu->mmio_base)
@@ -661,11 +713,17 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
661 if (!iommu->cmd_buf) 713 if (!iommu->cmd_buf)
662 return -ENOMEM; 714 return -ENOMEM;
663 715
716 iommu->evt_buf = alloc_event_buffer(iommu);
717 if (!iommu->evt_buf)
718 return -ENOMEM;
719
720 iommu->int_enabled = false;
721
664 init_iommu_from_pci(iommu); 722 init_iommu_from_pci(iommu);
665 init_iommu_from_acpi(iommu, h); 723 init_iommu_from_acpi(iommu, h);
666 init_iommu_devices(iommu); 724 init_iommu_devices(iommu);
667 725
668 return 0; 726 return pci_enable_device(iommu->dev);
669} 727}
670 728
671/* 729/*
@@ -706,6 +764,95 @@ static int __init init_iommu_all(struct acpi_table_header *table)
706 764
707/**************************************************************************** 765/****************************************************************************
708 * 766 *
767 * The following functions initialize the MSI interrupts for all IOMMUs
768 * in the system. Its a bit challenging because there could be multiple
769 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
770 * pci_dev.
771 *
772 ****************************************************************************/
773
774static int __init iommu_setup_msix(struct amd_iommu *iommu)
775{
776 struct amd_iommu *curr;
777 struct msix_entry entries[32]; /* only 32 supported by AMD IOMMU */
778 int nvec = 0, i;
779
780 list_for_each_entry(curr, &amd_iommu_list, list) {
781 if (curr->dev == iommu->dev) {
782 entries[nvec].entry = curr->evt_msi_num;
783 entries[nvec].vector = 0;
784 curr->int_enabled = true;
785 nvec++;
786 }
787 }
788
789 if (pci_enable_msix(iommu->dev, entries, nvec)) {
790 pci_disable_msix(iommu->dev);
791 return 1;
792 }
793
794 for (i = 0; i < nvec; ++i) {
795 int r = request_irq(entries->vector, amd_iommu_int_handler,
796 IRQF_SAMPLE_RANDOM,
797 "AMD IOMMU",
798 NULL);
799 if (r)
800 goto out_free;
801 }
802
803 return 0;
804
805out_free:
806 for (i -= 1; i >= 0; --i)
807 free_irq(entries->vector, NULL);
808
809 pci_disable_msix(iommu->dev);
810
811 return 1;
812}
813
814static int __init iommu_setup_msi(struct amd_iommu *iommu)
815{
816 int r;
817 struct amd_iommu *curr;
818
819 list_for_each_entry(curr, &amd_iommu_list, list) {
820 if (curr->dev == iommu->dev)
821 curr->int_enabled = true;
822 }
823
824
825 if (pci_enable_msi(iommu->dev))
826 return 1;
827
828 r = request_irq(iommu->dev->irq, amd_iommu_int_handler,
829 IRQF_SAMPLE_RANDOM,
830 "AMD IOMMU",
831 NULL);
832
833 if (r) {
834 pci_disable_msi(iommu->dev);
835 return 1;
836 }
837
838 return 0;
839}
840
841static int __init iommu_init_msi(struct amd_iommu *iommu)
842{
843 if (iommu->int_enabled)
844 return 0;
845
846 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSIX))
847 return iommu_setup_msix(iommu);
848 else if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
849 return iommu_setup_msi(iommu);
850
851 return 1;
852}
853
854/****************************************************************************
855 *
709 * The next functions belong to the third pass of parsing the ACPI 856 * The next functions belong to the third pass of parsing the ACPI
710 * table. In this last pass the memory mapping requirements are 857 * table. In this last pass the memory mapping requirements are
711 * gathered (like exclusion and unity mapping reanges). 858 * gathered (like exclusion and unity mapping reanges).
@@ -811,7 +958,6 @@ static void init_device_table(void)
811 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 958 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
812 set_dev_entry_bit(devid, DEV_ENTRY_VALID); 959 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
813 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); 960 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
814 set_dev_entry_bit(devid, DEV_ENTRY_NO_PAGE_FAULT);
815 } 961 }
816} 962}
817 963
@@ -825,6 +971,8 @@ static void __init enable_iommus(void)
825 971
826 list_for_each_entry(iommu, &amd_iommu_list, list) { 972 list_for_each_entry(iommu, &amd_iommu_list, list) {
827 iommu_set_exclusion_range(iommu); 973 iommu_set_exclusion_range(iommu);
974 iommu_init_msi(iommu);
975 iommu_enable_event_logging(iommu);
828 iommu_enable(iommu); 976 iommu_enable(iommu);
829 } 977 }
830} 978}
@@ -995,11 +1143,17 @@ int __init amd_iommu_init(void)
995 else 1143 else
996 printk("disabled\n"); 1144 printk("disabled\n");
997 1145
1146 if (amd_iommu_unmap_flush)
1147 printk(KERN_INFO "AMD IOMMU: IO/TLB flush on unmap enabled\n");
1148 else
1149 printk(KERN_INFO "AMD IOMMU: Lazy IO/TLB flushing enabled\n");
1150
998out: 1151out:
999 return ret; 1152 return ret;
1000 1153
1001free: 1154free:
1002 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 1); 1155 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1156 get_order(MAX_DOMAIN_ID/8));
1003 1157
1004 free_pages((unsigned long)amd_iommu_pd_table, 1158 free_pages((unsigned long)amd_iommu_pd_table,
1005 get_order(rlookup_table_size)); 1159 get_order(rlookup_table_size));
@@ -1057,8 +1211,10 @@ void __init amd_iommu_detect(void)
1057static int __init parse_amd_iommu_options(char *str) 1211static int __init parse_amd_iommu_options(char *str)
1058{ 1212{
1059 for (; *str; ++str) { 1213 for (; *str; ++str) {
1060 if (strcmp(str, "isolate") == 0) 1214 if (strncmp(str, "isolate", 7) == 0)
1061 amd_iommu_isolate = 1; 1215 amd_iommu_isolate = 1;
1216 if (strncmp(str, "fullflush", 11) == 0)
1217 amd_iommu_unmap_flush = true;
1062 } 1218 }
1063 1219
1064 return 1; 1220 return 1;
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 44e21826db11..9a32b37ee2ee 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -455,11 +455,11 @@ out:
455 force_iommu || 455 force_iommu ||
456 valid_agp || 456 valid_agp ||
457 fallback_aper_force) { 457 fallback_aper_force) {
458 printk(KERN_ERR 458 printk(KERN_INFO
459 "Your BIOS doesn't leave a aperture memory hole\n"); 459 "Your BIOS doesn't leave a aperture memory hole\n");
460 printk(KERN_ERR 460 printk(KERN_INFO
461 "Please enable the IOMMU option in the BIOS setup\n"); 461 "Please enable the IOMMU option in the BIOS setup\n");
462 printk(KERN_ERR 462 printk(KERN_INFO
463 "This costs you %d MB of RAM\n", 463 "This costs you %d MB of RAM\n",
464 32 << fallback_aper_order); 464 32 << fallback_aper_order);
465 465
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic.c
index f88bd0d982b0..04a7f960bbc0 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic.c
@@ -23,11 +23,13 @@
23#include <linux/mc146818rtc.h> 23#include <linux/mc146818rtc.h>
24#include <linux/kernel_stat.h> 24#include <linux/kernel_stat.h>
25#include <linux/sysdev.h> 25#include <linux/sysdev.h>
26#include <linux/ioport.h>
26#include <linux/cpu.h> 27#include <linux/cpu.h>
27#include <linux/clockchips.h> 28#include <linux/clockchips.h>
28#include <linux/acpi_pmtmr.h> 29#include <linux/acpi_pmtmr.h>
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/dmi.h> 31#include <linux/dmi.h>
32#include <linux/dmar.h>
31 33
32#include <asm/atomic.h> 34#include <asm/atomic.h>
33#include <asm/smp.h> 35#include <asm/smp.h>
@@ -36,8 +38,14 @@
36#include <asm/desc.h> 38#include <asm/desc.h>
37#include <asm/arch_hooks.h> 39#include <asm/arch_hooks.h>
38#include <asm/hpet.h> 40#include <asm/hpet.h>
41#include <asm/pgalloc.h>
39#include <asm/i8253.h> 42#include <asm/i8253.h>
40#include <asm/nmi.h> 43#include <asm/nmi.h>
44#include <asm/idle.h>
45#include <asm/proto.h>
46#include <asm/timex.h>
47#include <asm/apic.h>
48#include <asm/i8259.h>
41 49
42#include <mach_apic.h> 50#include <mach_apic.h>
43#include <mach_apicdef.h> 51#include <mach_apicdef.h>
@@ -50,20 +58,60 @@
50# error SPURIOUS_APIC_VECTOR definition error 58# error SPURIOUS_APIC_VECTOR definition error
51#endif 59#endif
52 60
53unsigned long mp_lapic_addr; 61#ifdef CONFIG_X86_32
54
55/* 62/*
56 * Knob to control our willingness to enable the local APIC. 63 * Knob to control our willingness to enable the local APIC.
57 * 64 *
58 * +1=force-enable 65 * +1=force-enable
59 */ 66 */
60static int force_enable_local_apic; 67static int force_enable_local_apic;
61int disable_apic; 68/*
69 * APIC command line parameters
70 */
71static int __init parse_lapic(char *arg)
72{
73 force_enable_local_apic = 1;
74 return 0;
75}
76early_param("lapic", parse_lapic);
77/* Local APIC was disabled by the BIOS and enabled by the kernel */
78static int enabled_via_apicbase;
79
80#endif
81
82#ifdef CONFIG_X86_64
83static int apic_calibrate_pmtmr __initdata;
84static __init int setup_apicpmtimer(char *s)
85{
86 apic_calibrate_pmtmr = 1;
87 notsc_setup(NULL);
88 return 0;
89}
90__setup("apicpmtimer", setup_apicpmtimer);
91#endif
92
93#ifdef CONFIG_X86_64
94#define HAVE_X2APIC
95#endif
96
97#ifdef HAVE_X2APIC
98int x2apic;
99/* x2apic enabled before OS handover */
100int x2apic_preenabled;
101int disable_x2apic;
102static __init int setup_nox2apic(char *str)
103{
104 disable_x2apic = 1;
105 setup_clear_cpu_cap(X86_FEATURE_X2APIC);
106 return 0;
107}
108early_param("nox2apic", setup_nox2apic);
109#endif
62 110
63/* Local APIC timer verification ok */ 111unsigned long mp_lapic_addr;
64static int local_apic_timer_verify_ok; 112int disable_apic;
65/* Disable local APIC timer from the kernel commandline or via dmi quirk */ 113/* Disable local APIC timer from the kernel commandline or via dmi quirk */
66static int local_apic_timer_disabled; 114static int disable_apic_timer __cpuinitdata;
67/* Local APIC timer works in C2 */ 115/* Local APIC timer works in C2 */
68int local_apic_timer_c2_ok; 116int local_apic_timer_c2_ok;
69EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); 117EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
@@ -112,9 +160,6 @@ static struct clock_event_device lapic_clockevent = {
112}; 160};
113static DEFINE_PER_CPU(struct clock_event_device, lapic_events); 161static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
114 162
115/* Local APIC was disabled by the BIOS and enabled by the kernel */
116static int enabled_via_apicbase;
117
118static unsigned long apic_phys; 163static unsigned long apic_phys;
119 164
120/* 165/*
@@ -130,7 +175,11 @@ static inline int lapic_get_version(void)
130 */ 175 */
131static inline int lapic_is_integrated(void) 176static inline int lapic_is_integrated(void)
132{ 177{
178#ifdef CONFIG_X86_64
179 return 1;
180#else
133 return APIC_INTEGRATED(lapic_get_version()); 181 return APIC_INTEGRATED(lapic_get_version());
182#endif
134} 183}
135 184
136/* 185/*
@@ -145,13 +194,18 @@ static int modern_apic(void)
145 return lapic_get_version() >= 0x14; 194 return lapic_get_version() >= 0x14;
146} 195}
147 196
148void apic_wait_icr_idle(void) 197/*
198 * Paravirt kernels also might be using these below ops. So we still
199 * use generic apic_read()/apic_write(), which might be pointing to different
200 * ops in PARAVIRT case.
201 */
202void xapic_wait_icr_idle(void)
149{ 203{
150 while (apic_read(APIC_ICR) & APIC_ICR_BUSY) 204 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
151 cpu_relax(); 205 cpu_relax();
152} 206}
153 207
154u32 safe_apic_wait_icr_idle(void) 208u32 safe_xapic_wait_icr_idle(void)
155{ 209{
156 u32 send_status; 210 u32 send_status;
157 int timeout; 211 int timeout;
@@ -167,19 +221,88 @@ u32 safe_apic_wait_icr_idle(void)
167 return send_status; 221 return send_status;
168} 222}
169 223
224void xapic_icr_write(u32 low, u32 id)
225{
226 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
227 apic_write(APIC_ICR, low);
228}
229
230u64 xapic_icr_read(void)
231{
232 u32 icr1, icr2;
233
234 icr2 = apic_read(APIC_ICR2);
235 icr1 = apic_read(APIC_ICR);
236
237 return icr1 | ((u64)icr2 << 32);
238}
239
240static struct apic_ops xapic_ops = {
241 .read = native_apic_mem_read,
242 .write = native_apic_mem_write,
243 .icr_read = xapic_icr_read,
244 .icr_write = xapic_icr_write,
245 .wait_icr_idle = xapic_wait_icr_idle,
246 .safe_wait_icr_idle = safe_xapic_wait_icr_idle,
247};
248
249struct apic_ops __read_mostly *apic_ops = &xapic_ops;
250EXPORT_SYMBOL_GPL(apic_ops);
251
252#ifdef HAVE_X2APIC
253static void x2apic_wait_icr_idle(void)
254{
255 /* no need to wait for icr idle in x2apic */
256 return;
257}
258
259static u32 safe_x2apic_wait_icr_idle(void)
260{
261 /* no need to wait for icr idle in x2apic */
262 return 0;
263}
264
265void x2apic_icr_write(u32 low, u32 id)
266{
267 wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
268}
269
270u64 x2apic_icr_read(void)
271{
272 unsigned long val;
273
274 rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
275 return val;
276}
277
278static struct apic_ops x2apic_ops = {
279 .read = native_apic_msr_read,
280 .write = native_apic_msr_write,
281 .icr_read = x2apic_icr_read,
282 .icr_write = x2apic_icr_write,
283 .wait_icr_idle = x2apic_wait_icr_idle,
284 .safe_wait_icr_idle = safe_x2apic_wait_icr_idle,
285};
286#endif
287
170/** 288/**
171 * enable_NMI_through_LVT0 - enable NMI through local vector table 0 289 * enable_NMI_through_LVT0 - enable NMI through local vector table 0
172 */ 290 */
173void __cpuinit enable_NMI_through_LVT0(void) 291void __cpuinit enable_NMI_through_LVT0(void)
174{ 292{
175 unsigned int v = APIC_DM_NMI; 293 unsigned int v;
294
295 /* unmask and set to NMI */
296 v = APIC_DM_NMI;
176 297
177 /* Level triggered for 82489DX */ 298 /* Level triggered for 82489DX (32bit mode) */
178 if (!lapic_is_integrated()) 299 if (!lapic_is_integrated())
179 v |= APIC_LVT_LEVEL_TRIGGER; 300 v |= APIC_LVT_LEVEL_TRIGGER;
301
180 apic_write(APIC_LVT0, v); 302 apic_write(APIC_LVT0, v);
181} 303}
182 304
305#ifdef CONFIG_X86_32
183/** 306/**
184 * get_physical_broadcast - Get number of physical broadcast IDs 307 * get_physical_broadcast - Get number of physical broadcast IDs
185 */ 308 */
@@ -187,15 +310,20 @@ int get_physical_broadcast(void)
187{ 310{
188 return modern_apic() ? 0xff : 0xf; 311 return modern_apic() ? 0xff : 0xf;
189} 312}
313#endif
190 314
191/** 315/**
192 * lapic_get_maxlvt - get the maximum number of local vector table entries 316 * lapic_get_maxlvt - get the maximum number of local vector table entries
193 */ 317 */
194int lapic_get_maxlvt(void) 318int lapic_get_maxlvt(void)
195{ 319{
196 unsigned int v = apic_read(APIC_LVR); 320 unsigned int v;
197 321
198 /* 82489DXs do not report # of LVT entries. */ 322 v = apic_read(APIC_LVR);
323 /*
324 * - we always have APIC integrated on 64bit mode
325 * - 82489DXs do not report # of LVT entries
326 */
199 return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2; 327 return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
200} 328}
201 329
@@ -203,7 +331,7 @@ int lapic_get_maxlvt(void)
203 * Local APIC timer 331 * Local APIC timer
204 */ 332 */
205 333
206/* Clock divisor is set to 16 */ 334/* Clock divisor */
207#define APIC_DIVISOR 16 335#define APIC_DIVISOR 16
208 336
209/* 337/*
@@ -212,6 +340,9 @@ int lapic_get_maxlvt(void)
212 * this function twice on the boot CPU, once with a bogus timeout 340 * this function twice on the boot CPU, once with a bogus timeout
213 * value, second time for real. The other (noncalibrating) CPUs 341 * value, second time for real. The other (noncalibrating) CPUs
214 * call this function only once, with the real, calibrated value. 342 * call this function only once, with the real, calibrated value.
343 *
344 * We do reads before writes even if unnecessary, to get around the
345 * P5 APIC double write bug.
215 */ 346 */
216static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) 347static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
217{ 348{
@@ -233,14 +364,48 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
233 */ 364 */
234 tmp_value = apic_read(APIC_TDCR); 365 tmp_value = apic_read(APIC_TDCR);
235 apic_write(APIC_TDCR, 366 apic_write(APIC_TDCR,
236 (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) | 367 (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
237 APIC_TDR_DIV_16); 368 APIC_TDR_DIV_16);
238 369
239 if (!oneshot) 370 if (!oneshot)
240 apic_write(APIC_TMICT, clocks / APIC_DIVISOR); 371 apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
241} 372}
242 373
243/* 374/*
375 * Setup extended LVT, AMD specific (K8, family 10h)
376 *
377 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
378 * MCE interrupts are supported. Thus MCE offset must be set to 0.
379 *
380 * If mask=1, the LVT entry does not generate interrupts while mask=0
381 * enables the vector. See also the BKDGs.
382 */
383
384#define APIC_EILVT_LVTOFF_MCE 0
385#define APIC_EILVT_LVTOFF_IBS 1
386
387static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
388{
389 unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
390 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
391
392 apic_write(reg, v);
393}
394
395u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
396{
397 setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
398 return APIC_EILVT_LVTOFF_MCE;
399}
400
401u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
402{
403 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
404 return APIC_EILVT_LVTOFF_IBS;
405}
406EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
407
408/*
244 * Program the next event, relative to now 409 * Program the next event, relative to now
245 */ 410 */
246static int lapic_next_event(unsigned long delta, 411static int lapic_next_event(unsigned long delta,
@@ -259,8 +424,8 @@ static void lapic_timer_setup(enum clock_event_mode mode,
259 unsigned long flags; 424 unsigned long flags;
260 unsigned int v; 425 unsigned int v;
261 426
262 /* Lapic used for broadcast ? */ 427 /* Lapic used as dummy for broadcast ? */
263 if (!local_apic_timer_verify_ok) 428 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
264 return; 429 return;
265 430
266 local_irq_save(flags); 431 local_irq_save(flags);
@@ -299,7 +464,7 @@ static void lapic_timer_broadcast(cpumask_t mask)
299 * Setup the local APIC timer for this CPU. Copy the initilized values 464 * Setup the local APIC timer for this CPU. Copy the initilized values
300 * of the boot CPU and register the clock event in the framework. 465 * of the boot CPU and register the clock event in the framework.
301 */ 466 */
302static void __devinit setup_APIC_timer(void) 467static void __cpuinit setup_APIC_timer(void)
303{ 468{
304 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 469 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
305 470
@@ -369,14 +534,51 @@ static void __init lapic_cal_handler(struct clock_event_device *dev)
369 } 534 }
370} 535}
371 536
537static int __init calibrate_by_pmtimer(long deltapm, long *delta)
538{
539 const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
540 const long pm_thresh = pm_100ms / 100;
541 unsigned long mult;
542 u64 res;
543
544#ifndef CONFIG_X86_PM_TIMER
545 return -1;
546#endif
547
548 apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm);
549
550 /* Check, if the PM timer is available */
551 if (!deltapm)
552 return -1;
553
554 mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
555
556 if (deltapm > (pm_100ms - pm_thresh) &&
557 deltapm < (pm_100ms + pm_thresh)) {
558 apic_printk(APIC_VERBOSE, "... PM timer result ok\n");
559 } else {
560 res = (((u64)deltapm) * mult) >> 22;
561 do_div(res, 1000000);
562 printk(KERN_WARNING "APIC calibration not consistent "
563 "with PM Timer: %ldms instead of 100ms\n",
564 (long)res);
565 /* Correct the lapic counter value */
566 res = (((u64)(*delta)) * pm_100ms);
567 do_div(res, deltapm);
568 printk(KERN_INFO "APIC delta adjusted to PM-Timer: "
569 "%lu (%ld)\n", (unsigned long)res, *delta);
570 *delta = (long)res;
571 }
572
573 return 0;
574}
575
372static int __init calibrate_APIC_clock(void) 576static int __init calibrate_APIC_clock(void)
373{ 577{
374 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 578 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
375 const long pm_100ms = PMTMR_TICKS_PER_SEC/10;
376 const long pm_thresh = pm_100ms/100;
377 void (*real_handler)(struct clock_event_device *dev); 579 void (*real_handler)(struct clock_event_device *dev);
378 unsigned long deltaj; 580 unsigned long deltaj;
379 long delta, deltapm; 581 long delta;
380 int pm_referenced = 0; 582 int pm_referenced = 0;
381 583
382 local_irq_disable(); 584 local_irq_disable();
@@ -386,10 +588,10 @@ static int __init calibrate_APIC_clock(void)
386 global_clock_event->event_handler = lapic_cal_handler; 588 global_clock_event->event_handler = lapic_cal_handler;
387 589
388 /* 590 /*
389 * Setup the APIC counter to 1e9. There is no way the lapic 591 * Setup the APIC counter to maximum. There is no way the lapic
390 * can underflow in the 100ms detection time frame 592 * can underflow in the 100ms detection time frame
391 */ 593 */
392 __setup_APIC_LVTT(1000000000, 0, 0); 594 __setup_APIC_LVTT(0xffffffff, 0, 0);
393 595
394 /* Let the interrupts run */ 596 /* Let the interrupts run */
395 local_irq_enable(); 597 local_irq_enable();
@@ -406,34 +608,9 @@ static int __init calibrate_APIC_clock(void)
406 delta = lapic_cal_t1 - lapic_cal_t2; 608 delta = lapic_cal_t1 - lapic_cal_t2;
407 apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta); 609 apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
408 610
409 /* Check, if the PM timer is available */ 611 /* we trust the PM based calibration if possible */
410 deltapm = lapic_cal_pm2 - lapic_cal_pm1; 612 pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
411 apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm); 613 &delta);
412
413 if (deltapm) {
414 unsigned long mult;
415 u64 res;
416
417 mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
418
419 if (deltapm > (pm_100ms - pm_thresh) &&
420 deltapm < (pm_100ms + pm_thresh)) {
421 apic_printk(APIC_VERBOSE, "... PM timer result ok\n");
422 } else {
423 res = (((u64) deltapm) * mult) >> 22;
424 do_div(res, 1000000);
425 printk(KERN_WARNING "APIC calibration not consistent "
426 "with PM Timer: %ldms instead of 100ms\n",
427 (long)res);
428 /* Correct the lapic counter value */
429 res = (((u64) delta) * pm_100ms);
430 do_div(res, deltapm);
431 printk(KERN_INFO "APIC delta adjusted to PM-Timer: "
432 "%lu (%ld)\n", (unsigned long) res, delta);
433 delta = (long) res;
434 }
435 pm_referenced = 1;
436 }
437 614
438 /* Calculate the scaled math multiplication factor */ 615 /* Calculate the scaled math multiplication factor */
439 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS, 616 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
@@ -473,9 +650,12 @@ static int __init calibrate_APIC_clock(void)
473 return -1; 650 return -1;
474 } 651 }
475 652
476 local_apic_timer_verify_ok = 1; 653 levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
477 654
478 /* We trust the pm timer based calibration */ 655 /*
656 * PM timer calibration failed or not turned on
657 * so lets try APIC timer based calibration
658 */
479 if (!pm_referenced) { 659 if (!pm_referenced) {
480 apic_printk(APIC_VERBOSE, "... verify APIC timer\n"); 660 apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
481 661
@@ -507,11 +687,11 @@ static int __init calibrate_APIC_clock(void)
507 if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2) 687 if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
508 apic_printk(APIC_VERBOSE, "... jiffies result ok\n"); 688 apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
509 else 689 else
510 local_apic_timer_verify_ok = 0; 690 levt->features |= CLOCK_EVT_FEAT_DUMMY;
511 } else 691 } else
512 local_irq_enable(); 692 local_irq_enable();
513 693
514 if (!local_apic_timer_verify_ok) { 694 if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
515 printk(KERN_WARNING 695 printk(KERN_WARNING
516 "APIC timer disabled due to verification failure.\n"); 696 "APIC timer disabled due to verification failure.\n");
517 return -1; 697 return -1;
@@ -533,7 +713,8 @@ void __init setup_boot_APIC_clock(void)
533 * timer as a dummy clock event source on SMP systems, so the 713 * timer as a dummy clock event source on SMP systems, so the
534 * broadcast mechanism is used. On UP systems simply ignore it. 714 * broadcast mechanism is used. On UP systems simply ignore it.
535 */ 715 */
536 if (local_apic_timer_disabled) { 716 if (disable_apic_timer) {
717 printk(KERN_INFO "Disabling APIC timer\n");
537 /* No broadcast on UP ! */ 718 /* No broadcast on UP ! */
538 if (num_possible_cpus() > 1) { 719 if (num_possible_cpus() > 1) {
539 lapic_clockevent.mult = 1; 720 lapic_clockevent.mult = 1;
@@ -567,7 +748,7 @@ void __init setup_boot_APIC_clock(void)
567 setup_APIC_timer(); 748 setup_APIC_timer();
568} 749}
569 750
570void __devinit setup_secondary_APIC_clock(void) 751void __cpuinit setup_secondary_APIC_clock(void)
571{ 752{
572 setup_APIC_timer(); 753 setup_APIC_timer();
573} 754}
@@ -602,7 +783,11 @@ static void local_apic_timer_interrupt(void)
602 /* 783 /*
603 * the NMI deadlock-detector uses this. 784 * the NMI deadlock-detector uses this.
604 */ 785 */
786#ifdef CONFIG_X86_64
787 add_pda(apic_timer_irqs, 1);
788#else
605 per_cpu(irq_stat, cpu).apic_timer_irqs++; 789 per_cpu(irq_stat, cpu).apic_timer_irqs++;
790#endif
606 791
607 evt->event_handler(evt); 792 evt->event_handler(evt);
608} 793}
@@ -629,6 +814,9 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
629 * Besides, if we don't timer interrupts ignore the global 814 * Besides, if we don't timer interrupts ignore the global
630 * interrupt lock, which is the WrongThing (tm) to do. 815 * interrupt lock, which is the WrongThing (tm) to do.
631 */ 816 */
817#ifdef CONFIG_X86_64
818 exit_idle();
819#endif
632 irq_enter(); 820 irq_enter();
633 local_apic_timer_interrupt(); 821 local_apic_timer_interrupt();
634 irq_exit(); 822 irq_exit();
@@ -642,35 +830,6 @@ int setup_profiling_timer(unsigned int multiplier)
642} 830}
643 831
644/* 832/*
645 * Setup extended LVT, AMD specific (K8, family 10h)
646 *
647 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
648 * MCE interrupts are supported. Thus MCE offset must be set to 0.
649 */
650
651#define APIC_EILVT_LVTOFF_MCE 0
652#define APIC_EILVT_LVTOFF_IBS 1
653
654static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
655{
656 unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
657 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
658 apic_write(reg, v);
659}
660
661u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
662{
663 setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
664 return APIC_EILVT_LVTOFF_MCE;
665}
666
667u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
668{
669 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
670 return APIC_EILVT_LVTOFF_IBS;
671}
672
673/*
674 * Local APIC start and shutdown 833 * Local APIC start and shutdown
675 */ 834 */
676 835
@@ -715,7 +874,7 @@ void clear_local_APIC(void)
715 } 874 }
716 875
717 /* lets not touch this if we didn't frob it */ 876 /* lets not touch this if we didn't frob it */
718#ifdef CONFIG_X86_MCE_P4THERMAL 877#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(X86_MCE_INTEL)
719 if (maxlvt >= 5) { 878 if (maxlvt >= 5) {
720 v = apic_read(APIC_LVTTHMR); 879 v = apic_read(APIC_LVTTHMR);
721 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); 880 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
@@ -732,10 +891,6 @@ void clear_local_APIC(void)
732 if (maxlvt >= 4) 891 if (maxlvt >= 4)
733 apic_write(APIC_LVTPC, APIC_LVT_MASKED); 892 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
734 893
735#ifdef CONFIG_X86_MCE_P4THERMAL
736 if (maxlvt >= 5)
737 apic_write(APIC_LVTTHMR, APIC_LVT_MASKED);
738#endif
739 /* Integrated APIC (!82489DX) ? */ 894 /* Integrated APIC (!82489DX) ? */
740 if (lapic_is_integrated()) { 895 if (lapic_is_integrated()) {
741 if (maxlvt > 3) 896 if (maxlvt > 3)
@@ -750,7 +905,7 @@ void clear_local_APIC(void)
750 */ 905 */
751void disable_local_APIC(void) 906void disable_local_APIC(void)
752{ 907{
753 unsigned long value; 908 unsigned int value;
754 909
755 clear_local_APIC(); 910 clear_local_APIC();
756 911
@@ -762,6 +917,7 @@ void disable_local_APIC(void)
762 value &= ~APIC_SPIV_APIC_ENABLED; 917 value &= ~APIC_SPIV_APIC_ENABLED;
763 apic_write(APIC_SPIV, value); 918 apic_write(APIC_SPIV, value);
764 919
920#ifdef CONFIG_X86_32
765 /* 921 /*
766 * When LAPIC was disabled by the BIOS and enabled by the kernel, 922 * When LAPIC was disabled by the BIOS and enabled by the kernel,
767 * restore the disabled state. 923 * restore the disabled state.
@@ -773,6 +929,7 @@ void disable_local_APIC(void)
773 l &= ~MSR_IA32_APICBASE_ENABLE; 929 l &= ~MSR_IA32_APICBASE_ENABLE;
774 wrmsr(MSR_IA32_APICBASE, l, h); 930 wrmsr(MSR_IA32_APICBASE, l, h);
775 } 931 }
932#endif
776} 933}
777 934
778/* 935/*
@@ -789,11 +946,15 @@ void lapic_shutdown(void)
789 return; 946 return;
790 947
791 local_irq_save(flags); 948 local_irq_save(flags);
792 clear_local_APIC();
793 949
794 if (enabled_via_apicbase) 950#ifdef CONFIG_X86_32
951 if (!enabled_via_apicbase)
952 clear_local_APIC();
953 else
954#endif
795 disable_local_APIC(); 955 disable_local_APIC();
796 956
957
797 local_irq_restore(flags); 958 local_irq_restore(flags);
798} 959}
799 960
@@ -838,6 +999,12 @@ int __init verify_local_APIC(void)
838 */ 999 */
839 reg0 = apic_read(APIC_ID); 1000 reg0 = apic_read(APIC_ID);
840 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); 1001 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
1002 apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
1003 reg1 = apic_read(APIC_ID);
1004 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
1005 apic_write(APIC_ID, reg0);
1006 if (reg1 != (reg0 ^ APIC_ID_MASK))
1007 return 0;
841 1008
842 /* 1009 /*
843 * The next two are just to see if we have sane values. 1010 * The next two are just to see if we have sane values.
@@ -863,14 +1030,15 @@ void __init sync_Arb_IDs(void)
863 */ 1030 */
864 if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 1031 if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
865 return; 1032 return;
1033
866 /* 1034 /*
867 * Wait for idle. 1035 * Wait for idle.
868 */ 1036 */
869 apic_wait_icr_idle(); 1037 apic_wait_icr_idle();
870 1038
871 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n"); 1039 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
872 apic_write(APIC_ICR, 1040 apic_write(APIC_ICR, APIC_DEST_ALLINC |
873 APIC_DEST_ALLINC | APIC_INT_LEVELTRIG | APIC_DM_INIT); 1041 APIC_INT_LEVELTRIG | APIC_DM_INIT);
874} 1042}
875 1043
876/* 1044/*
@@ -878,7 +1046,7 @@ void __init sync_Arb_IDs(void)
878 */ 1046 */
879void __init init_bsp_APIC(void) 1047void __init init_bsp_APIC(void)
880{ 1048{
881 unsigned long value; 1049 unsigned int value;
882 1050
883 /* 1051 /*
884 * Don't do the setup now if we have a SMP BIOS as the 1052 * Don't do the setup now if we have a SMP BIOS as the
@@ -899,11 +1067,13 @@ void __init init_bsp_APIC(void)
899 value &= ~APIC_VECTOR_MASK; 1067 value &= ~APIC_VECTOR_MASK;
900 value |= APIC_SPIV_APIC_ENABLED; 1068 value |= APIC_SPIV_APIC_ENABLED;
901 1069
1070#ifdef CONFIG_X86_32
902 /* This bit is reserved on P4/Xeon and should be cleared */ 1071 /* This bit is reserved on P4/Xeon and should be cleared */
903 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 1072 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
904 (boot_cpu_data.x86 == 15)) 1073 (boot_cpu_data.x86 == 15))
905 value &= ~APIC_SPIV_FOCUS_DISABLED; 1074 value &= ~APIC_SPIV_FOCUS_DISABLED;
906 else 1075 else
1076#endif
907 value |= APIC_SPIV_FOCUS_DISABLED; 1077 value |= APIC_SPIV_FOCUS_DISABLED;
908 value |= SPURIOUS_APIC_VECTOR; 1078 value |= SPURIOUS_APIC_VECTOR;
909 apic_write(APIC_SPIV, value); 1079 apic_write(APIC_SPIV, value);
@@ -920,39 +1090,43 @@ void __init init_bsp_APIC(void)
920 1090
921static void __cpuinit lapic_setup_esr(void) 1091static void __cpuinit lapic_setup_esr(void)
922{ 1092{
923 unsigned long oldvalue, value, maxlvt; 1093 unsigned int oldvalue, value, maxlvt;
924 if (lapic_is_integrated() && !esr_disable) { 1094
925 /* !82489DX */ 1095 if (!lapic_is_integrated()) {
926 maxlvt = lapic_get_maxlvt(); 1096 printk(KERN_INFO "No ESR for 82489DX.\n");
927 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 1097 return;
928 apic_write(APIC_ESR, 0); 1098 }
929 oldvalue = apic_read(APIC_ESR);
930 1099
931 /* enables sending errors */ 1100 if (esr_disable) {
932 value = ERROR_APIC_VECTOR;
933 apic_write(APIC_LVTERR, value);
934 /* 1101 /*
935 * spec says clear errors after enabling vector. 1102 * Something untraceable is creating bad interrupts on
1103 * secondary quads ... for the moment, just leave the
1104 * ESR disabled - we can't do anything useful with the
1105 * errors anyway - mbligh
936 */ 1106 */
937 if (maxlvt > 3) 1107 printk(KERN_INFO "Leaving ESR disabled.\n");
938 apic_write(APIC_ESR, 0); 1108 return;
939 value = apic_read(APIC_ESR);
940 if (value != oldvalue)
941 apic_printk(APIC_VERBOSE, "ESR value before enabling "
942 "vector: 0x%08lx after: 0x%08lx\n",
943 oldvalue, value);
944 } else {
945 if (esr_disable)
946 /*
947 * Something untraceable is creating bad interrupts on
948 * secondary quads ... for the moment, just leave the
949 * ESR disabled - we can't do anything useful with the
950 * errors anyway - mbligh
951 */
952 printk(KERN_INFO "Leaving ESR disabled.\n");
953 else
954 printk(KERN_INFO "No ESR for 82489DX.\n");
955 } 1109 }
1110
1111 maxlvt = lapic_get_maxlvt();
1112 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1113 apic_write(APIC_ESR, 0);
1114 oldvalue = apic_read(APIC_ESR);
1115
1116 /* enables sending errors */
1117 value = ERROR_APIC_VECTOR;
1118 apic_write(APIC_LVTERR, value);
1119
1120 /*
1121 * spec says clear errors after enabling vector.
1122 */
1123 if (maxlvt > 3)
1124 apic_write(APIC_ESR, 0);
1125 value = apic_read(APIC_ESR);
1126 if (value != oldvalue)
1127 apic_printk(APIC_VERBOSE, "ESR value before enabling "
1128 "vector: 0x%08x after: 0x%08x\n",
1129 oldvalue, value);
956} 1130}
957 1131
958 1132
@@ -961,24 +1135,27 @@ static void __cpuinit lapic_setup_esr(void)
961 */ 1135 */
962void __cpuinit setup_local_APIC(void) 1136void __cpuinit setup_local_APIC(void)
963{ 1137{
964 unsigned long value, integrated; 1138 unsigned int value;
965 int i, j; 1139 int i, j;
966 1140
1141#ifdef CONFIG_X86_32
967 /* Pound the ESR really hard over the head with a big hammer - mbligh */ 1142 /* Pound the ESR really hard over the head with a big hammer - mbligh */
968 if (esr_disable) { 1143 if (lapic_is_integrated() && esr_disable) {
969 apic_write(APIC_ESR, 0); 1144 apic_write(APIC_ESR, 0);
970 apic_write(APIC_ESR, 0); 1145 apic_write(APIC_ESR, 0);
971 apic_write(APIC_ESR, 0); 1146 apic_write(APIC_ESR, 0);
972 apic_write(APIC_ESR, 0); 1147 apic_write(APIC_ESR, 0);
973 } 1148 }
1149#endif
974 1150
975 integrated = lapic_is_integrated(); 1151 preempt_disable();
976 1152
977 /* 1153 /*
978 * Double-check whether this APIC is really registered. 1154 * Double-check whether this APIC is really registered.
1155 * This is meaningless in clustered apic mode, so we skip it.
979 */ 1156 */
980 if (!apic_id_registered()) 1157 if (!apic_id_registered())
981 WARN_ON_ONCE(1); 1158 BUG();
982 1159
983 /* 1160 /*
984 * Intel recommends to set DFR, LDR and TPR before enabling 1161 * Intel recommends to set DFR, LDR and TPR before enabling
@@ -1024,6 +1201,7 @@ void __cpuinit setup_local_APIC(void)
1024 */ 1201 */
1025 value |= APIC_SPIV_APIC_ENABLED; 1202 value |= APIC_SPIV_APIC_ENABLED;
1026 1203
1204#ifdef CONFIG_X86_32
1027 /* 1205 /*
1028 * Some unknown Intel IO/APIC (or APIC) errata is biting us with 1206 * Some unknown Intel IO/APIC (or APIC) errata is biting us with
1029 * certain networking cards. If high frequency interrupts are 1207 * certain networking cards. If high frequency interrupts are
@@ -1044,8 +1222,13 @@ void __cpuinit setup_local_APIC(void)
1044 * See also the comment in end_level_ioapic_irq(). --macro 1222 * See also the comment in end_level_ioapic_irq(). --macro
1045 */ 1223 */
1046 1224
1047 /* Enable focus processor (bit==0) */ 1225 /*
1226 * - enable focus processor (bit==0)
1227 * - 64bit mode always use processor focus
1228 * so no need to set it
1229 */
1048 value &= ~APIC_SPIV_FOCUS_DISABLED; 1230 value &= ~APIC_SPIV_FOCUS_DISABLED;
1231#endif
1049 1232
1050 /* 1233 /*
1051 * Set spurious IRQ vector 1234 * Set spurious IRQ vector
@@ -1082,25 +1265,178 @@ void __cpuinit setup_local_APIC(void)
1082 value = APIC_DM_NMI; 1265 value = APIC_DM_NMI;
1083 else 1266 else
1084 value = APIC_DM_NMI | APIC_LVT_MASKED; 1267 value = APIC_DM_NMI | APIC_LVT_MASKED;
1085 if (!integrated) /* 82489DX */ 1268 if (!lapic_is_integrated()) /* 82489DX */
1086 value |= APIC_LVT_LEVEL_TRIGGER; 1269 value |= APIC_LVT_LEVEL_TRIGGER;
1087 apic_write(APIC_LVT1, value); 1270 apic_write(APIC_LVT1, value);
1271
1272 preempt_enable();
1088} 1273}
1089 1274
1090void __cpuinit end_local_APIC_setup(void) 1275void __cpuinit end_local_APIC_setup(void)
1091{ 1276{
1092 unsigned long value;
1093
1094 lapic_setup_esr(); 1277 lapic_setup_esr();
1095 /* Disable the local apic timer */ 1278
1096 value = apic_read(APIC_LVTT); 1279#ifdef CONFIG_X86_32
1097 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); 1280 {
1098 apic_write(APIC_LVTT, value); 1281 unsigned int value;
1282 /* Disable the local apic timer */
1283 value = apic_read(APIC_LVTT);
1284 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1285 apic_write(APIC_LVTT, value);
1286 }
1287#endif
1099 1288
1100 setup_apic_nmi_watchdog(NULL); 1289 setup_apic_nmi_watchdog(NULL);
1101 apic_pm_activate(); 1290 apic_pm_activate();
1102} 1291}
1103 1292
1293#ifdef HAVE_X2APIC
1294void check_x2apic(void)
1295{
1296 int msr, msr2;
1297
1298 rdmsr(MSR_IA32_APICBASE, msr, msr2);
1299
1300 if (msr & X2APIC_ENABLE) {
1301 printk("x2apic enabled by BIOS, switching to x2apic ops\n");
1302 x2apic_preenabled = x2apic = 1;
1303 apic_ops = &x2apic_ops;
1304 }
1305}
1306
1307void enable_x2apic(void)
1308{
1309 int msr, msr2;
1310
1311 rdmsr(MSR_IA32_APICBASE, msr, msr2);
1312 if (!(msr & X2APIC_ENABLE)) {
1313 printk("Enabling x2apic\n");
1314 wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
1315 }
1316}
1317
1318void enable_IR_x2apic(void)
1319{
1320#ifdef CONFIG_INTR_REMAP
1321 int ret;
1322 unsigned long flags;
1323
1324 if (!cpu_has_x2apic)
1325 return;
1326
1327 if (!x2apic_preenabled && disable_x2apic) {
1328 printk(KERN_INFO
1329 "Skipped enabling x2apic and Interrupt-remapping "
1330 "because of nox2apic\n");
1331 return;
1332 }
1333
1334 if (x2apic_preenabled && disable_x2apic)
1335 panic("Bios already enabled x2apic, can't enforce nox2apic");
1336
1337 if (!x2apic_preenabled && skip_ioapic_setup) {
1338 printk(KERN_INFO
1339 "Skipped enabling x2apic and Interrupt-remapping "
1340 "because of skipping io-apic setup\n");
1341 return;
1342 }
1343
1344 ret = dmar_table_init();
1345 if (ret) {
1346 printk(KERN_INFO
1347 "dmar_table_init() failed with %d:\n", ret);
1348
1349 if (x2apic_preenabled)
1350 panic("x2apic enabled by bios. But IR enabling failed");
1351 else
1352 printk(KERN_INFO
1353 "Not enabling x2apic,Intr-remapping\n");
1354 return;
1355 }
1356
1357 local_irq_save(flags);
1358 mask_8259A();
1359
1360 ret = save_mask_IO_APIC_setup();
1361 if (ret) {
1362 printk(KERN_INFO "Saving IO-APIC state failed: %d\n", ret);
1363 goto end;
1364 }
1365
1366 ret = enable_intr_remapping(1);
1367
1368 if (ret && x2apic_preenabled) {
1369 local_irq_restore(flags);
1370 panic("x2apic enabled by bios. But IR enabling failed");
1371 }
1372
1373 if (ret)
1374 goto end_restore;
1375
1376 if (!x2apic) {
1377 x2apic = 1;
1378 apic_ops = &x2apic_ops;
1379 enable_x2apic();
1380 }
1381
1382end_restore:
1383 if (ret)
1384 /*
1385 * IR enabling failed
1386 */
1387 restore_IO_APIC_setup();
1388 else
1389 reinit_intr_remapped_IO_APIC(x2apic_preenabled);
1390
1391end:
1392 unmask_8259A();
1393 local_irq_restore(flags);
1394
1395 if (!ret) {
1396 if (!x2apic_preenabled)
1397 printk(KERN_INFO
1398 "Enabled x2apic and interrupt-remapping\n");
1399 else
1400 printk(KERN_INFO
1401 "Enabled Interrupt-remapping\n");
1402 } else
1403 printk(KERN_ERR
1404 "Failed to enable Interrupt-remapping and x2apic\n");
1405#else
1406 if (!cpu_has_x2apic)
1407 return;
1408
1409 if (x2apic_preenabled)
1410 panic("x2apic enabled prior OS handover,"
1411 " enable CONFIG_INTR_REMAP");
1412
1413 printk(KERN_INFO "Enable CONFIG_INTR_REMAP for enabling intr-remapping "
1414 " and x2apic\n");
1415#endif
1416
1417 return;
1418}
1419#endif /* HAVE_X2APIC */
1420
1421#ifdef CONFIG_X86_64
1422/*
1423 * Detect and enable local APICs on non-SMP boards.
1424 * Original code written by Keir Fraser.
1425 * On AMD64 we trust the BIOS - if it says no APIC it is likely
1426 * not correctly set up (usually the APIC timer won't work etc.)
1427 */
1428static int __init detect_init_APIC(void)
1429{
1430 if (!cpu_has_apic) {
1431 printk(KERN_INFO "No local APIC present\n");
1432 return -1;
1433 }
1434
1435 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1436 boot_cpu_physical_apicid = 0;
1437 return 0;
1438}
1439#else
1104/* 1440/*
1105 * Detect and initialize APIC 1441 * Detect and initialize APIC
1106 */ 1442 */
@@ -1179,12 +1515,46 @@ no_apic:
1179 printk(KERN_INFO "No local APIC present or hardware disabled\n"); 1515 printk(KERN_INFO "No local APIC present or hardware disabled\n");
1180 return -1; 1516 return -1;
1181} 1517}
1518#endif
1519
1520#ifdef CONFIG_X86_64
1521void __init early_init_lapic_mapping(void)
1522{
1523 unsigned long phys_addr;
1524
1525 /*
1526 * If no local APIC can be found then go out
1527 * : it means there is no mpatable and MADT
1528 */
1529 if (!smp_found_config)
1530 return;
1531
1532 phys_addr = mp_lapic_addr;
1533
1534 set_fixmap_nocache(FIX_APIC_BASE, phys_addr);
1535 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
1536 APIC_BASE, phys_addr);
1537
1538 /*
1539 * Fetch the APIC ID of the BSP in case we have a
1540 * default configuration (or the MP table is broken).
1541 */
1542 boot_cpu_physical_apicid = read_apic_id();
1543}
1544#endif
1182 1545
1183/** 1546/**
1184 * init_apic_mappings - initialize APIC mappings 1547 * init_apic_mappings - initialize APIC mappings
1185 */ 1548 */
1186void __init init_apic_mappings(void) 1549void __init init_apic_mappings(void)
1187{ 1550{
1551#ifdef HAVE_X2APIC
1552 if (x2apic) {
1553 boot_cpu_physical_apicid = read_apic_id();
1554 return;
1555 }
1556#endif
1557
1188 /* 1558 /*
1189 * If no local APIC can be found then set up a fake all 1559 * If no local APIC can be found then set up a fake all
1190 * zeroes page to simulate the local APIC and another 1560 * zeroes page to simulate the local APIC and another
@@ -1197,27 +1567,36 @@ void __init init_apic_mappings(void)
1197 apic_phys = mp_lapic_addr; 1567 apic_phys = mp_lapic_addr;
1198 1568
1199 set_fixmap_nocache(FIX_APIC_BASE, apic_phys); 1569 set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
1200 printk(KERN_DEBUG "mapped APIC to %08lx (%08lx)\n", APIC_BASE, 1570 apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n",
1201 apic_phys); 1571 APIC_BASE, apic_phys);
1202 1572
1203 /* 1573 /*
1204 * Fetch the APIC ID of the BSP in case we have a 1574 * Fetch the APIC ID of the BSP in case we have a
1205 * default configuration (or the MP table is broken). 1575 * default configuration (or the MP table is broken).
1206 */ 1576 */
1207 if (boot_cpu_physical_apicid == -1U) 1577 if (boot_cpu_physical_apicid == -1U)
1208 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); 1578 boot_cpu_physical_apicid = read_apic_id();
1209
1210} 1579}
1211 1580
1212/* 1581/*
1213 * This initializes the IO-APIC and APIC hardware if this is 1582 * This initializes the IO-APIC and APIC hardware if this is
1214 * a UP kernel. 1583 * a UP kernel.
1215 */ 1584 */
1216
1217int apic_version[MAX_APICS]; 1585int apic_version[MAX_APICS];
1218 1586
1219int __init APIC_init_uniprocessor(void) 1587int __init APIC_init_uniprocessor(void)
1220{ 1588{
1589#ifdef CONFIG_X86_64
1590 if (disable_apic) {
1591 printk(KERN_INFO "Apic disabled\n");
1592 return -1;
1593 }
1594 if (!cpu_has_apic) {
1595 disable_apic = 1;
1596 printk(KERN_INFO "Apic disabled by BIOS\n");
1597 return -1;
1598 }
1599#else
1221 if (!smp_found_config && !cpu_has_apic) 1600 if (!smp_found_config && !cpu_has_apic)
1222 return -1; 1601 return -1;
1223 1602
@@ -1226,39 +1605,68 @@ int __init APIC_init_uniprocessor(void)
1226 */ 1605 */
1227 if (!cpu_has_apic && 1606 if (!cpu_has_apic &&
1228 APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { 1607 APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
1229 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", 1608 printk(KERN_ERR "BIOS bug, local APIC 0x%x not detected!...\n",
1230 boot_cpu_physical_apicid); 1609 boot_cpu_physical_apicid);
1231 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); 1610 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1232 return -1; 1611 return -1;
1233 } 1612 }
1613#endif
1234 1614
1235 verify_local_APIC(); 1615#ifdef HAVE_X2APIC
1616 enable_IR_x2apic();
1617#endif
1618#ifdef CONFIG_X86_64
1619 setup_apic_routing();
1620#endif
1236 1621
1622 verify_local_APIC();
1237 connect_bsp_APIC(); 1623 connect_bsp_APIC();
1238 1624
1625#ifdef CONFIG_X86_64
1626 apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
1627#else
1239 /* 1628 /*
1240 * Hack: In case of kdump, after a crash, kernel might be booting 1629 * Hack: In case of kdump, after a crash, kernel might be booting
1241 * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid 1630 * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
1242 * might be zero if read from MP tables. Get it from LAPIC. 1631 * might be zero if read from MP tables. Get it from LAPIC.
1243 */ 1632 */
1244#ifdef CONFIG_CRASH_DUMP 1633# ifdef CONFIG_CRASH_DUMP
1245 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); 1634 boot_cpu_physical_apicid = read_apic_id();
1635# endif
1246#endif 1636#endif
1247 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); 1637 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1248
1249 setup_local_APIC(); 1638 setup_local_APIC();
1250 1639
1640#ifdef CONFIG_X86_64
1641 /*
1642 * Now enable IO-APICs, actually call clear_IO_APIC
1643 * We need clear_IO_APIC before enabling vector on BP
1644 */
1645 if (!skip_ioapic_setup && nr_ioapics)
1646 enable_IO_APIC();
1647#endif
1648
1251#ifdef CONFIG_X86_IO_APIC 1649#ifdef CONFIG_X86_IO_APIC
1252 if (!smp_found_config || skip_ioapic_setup || !nr_ioapics) 1650 if (!smp_found_config || skip_ioapic_setup || !nr_ioapics)
1253#endif 1651#endif
1254 localise_nmi_watchdog(); 1652 localise_nmi_watchdog();
1255 end_local_APIC_setup(); 1653 end_local_APIC_setup();
1654
1256#ifdef CONFIG_X86_IO_APIC 1655#ifdef CONFIG_X86_IO_APIC
1257 if (smp_found_config) 1656 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1258 if (!skip_ioapic_setup && nr_ioapics) 1657 setup_IO_APIC();
1259 setup_IO_APIC(); 1658# ifdef CONFIG_X86_64
1659 else
1660 nr_ioapics = 0;
1661# endif
1260#endif 1662#endif
1663
1664#ifdef CONFIG_X86_64
1665 setup_boot_APIC_clock();
1666 check_nmi_watchdog();
1667#else
1261 setup_boot_clock(); 1668 setup_boot_clock();
1669#endif
1262 1670
1263 return 0; 1671 return 0;
1264} 1672}
@@ -1272,8 +1680,11 @@ int __init APIC_init_uniprocessor(void)
1272 */ 1680 */
1273void smp_spurious_interrupt(struct pt_regs *regs) 1681void smp_spurious_interrupt(struct pt_regs *regs)
1274{ 1682{
1275 unsigned long v; 1683 u32 v;
1276 1684
1685#ifdef CONFIG_X86_64
1686 exit_idle();
1687#endif
1277 irq_enter(); 1688 irq_enter();
1278 /* 1689 /*
1279 * Check if this really is a spurious interrupt and ACK it 1690 * Check if this really is a spurious interrupt and ACK it
@@ -1284,10 +1695,14 @@ void smp_spurious_interrupt(struct pt_regs *regs)
1284 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) 1695 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1285 ack_APIC_irq(); 1696 ack_APIC_irq();
1286 1697
1698#ifdef CONFIG_X86_64
1699 add_pda(irq_spurious_count, 1);
1700#else
1287 /* see sw-dev-man vol 3, chapter 7.4.13.5 */ 1701 /* see sw-dev-man vol 3, chapter 7.4.13.5 */
1288 printk(KERN_INFO "spurious APIC interrupt on CPU#%d, " 1702 printk(KERN_INFO "spurious APIC interrupt on CPU#%d, "
1289 "should never happen.\n", smp_processor_id()); 1703 "should never happen.\n", smp_processor_id());
1290 __get_cpu_var(irq_stat).irq_spurious_count++; 1704 __get_cpu_var(irq_stat).irq_spurious_count++;
1705#endif
1291 irq_exit(); 1706 irq_exit();
1292} 1707}
1293 1708
@@ -1296,8 +1711,11 @@ void smp_spurious_interrupt(struct pt_regs *regs)
1296 */ 1711 */
1297void smp_error_interrupt(struct pt_regs *regs) 1712void smp_error_interrupt(struct pt_regs *regs)
1298{ 1713{
1299 unsigned long v, v1; 1714 u32 v, v1;
1300 1715
1716#ifdef CONFIG_X86_64
1717 exit_idle();
1718#endif
1301 irq_enter(); 1719 irq_enter();
1302 /* First tickle the hardware, only then report what went on. -- REW */ 1720 /* First tickle the hardware, only then report what went on. -- REW */
1303 v = apic_read(APIC_ESR); 1721 v = apic_read(APIC_ESR);
@@ -1316,64 +1734,17 @@ void smp_error_interrupt(struct pt_regs *regs)
1316 6: Received illegal vector 1734 6: Received illegal vector
1317 7: Illegal register address 1735 7: Illegal register address
1318 */ 1736 */
1319 printk(KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n", 1737 printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
1320 smp_processor_id(), v , v1); 1738 smp_processor_id(), v , v1);
1321 irq_exit(); 1739 irq_exit();
1322} 1740}
1323 1741
1324#ifdef CONFIG_SMP
1325void __init smp_intr_init(void)
1326{
1327 /*
1328 * IRQ0 must be given a fixed assignment and initialized,
1329 * because it's used before the IO-APIC is set up.
1330 */
1331 set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
1332
1333 /*
1334 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
1335 * IPI, driven by wakeup.
1336 */
1337 alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
1338
1339 /* IPI for invalidation */
1340 alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
1341
1342 /* IPI for generic function call */
1343 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
1344
1345 /* IPI for single call function */
1346 set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
1347 call_function_single_interrupt);
1348}
1349#endif
1350
1351/*
1352 * Initialize APIC interrupts
1353 */
1354void __init apic_intr_init(void)
1355{
1356#ifdef CONFIG_SMP
1357 smp_intr_init();
1358#endif
1359 /* self generated IPI for local APIC timer */
1360 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
1361
1362 /* IPI vectors for APIC spurious and error interrupts */
1363 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
1364 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
1365
1366 /* thermal monitor LVT interrupt */
1367#ifdef CONFIG_X86_MCE_P4THERMAL
1368 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
1369#endif
1370}
1371
1372/** 1742/**
1373 * connect_bsp_APIC - attach the APIC to the interrupt system 1743 * connect_bsp_APIC - attach the APIC to the interrupt system
1374 */ 1744 */
1375void __init connect_bsp_APIC(void) 1745void __init connect_bsp_APIC(void)
1376{ 1746{
1747#ifdef CONFIG_X86_32
1377 if (pic_mode) { 1748 if (pic_mode) {
1378 /* 1749 /*
1379 * Do not trust the local APIC being empty at bootup. 1750 * Do not trust the local APIC being empty at bootup.
@@ -1388,6 +1759,7 @@ void __init connect_bsp_APIC(void)
1388 outb(0x70, 0x22); 1759 outb(0x70, 0x22);
1389 outb(0x01, 0x23); 1760 outb(0x01, 0x23);
1390 } 1761 }
1762#endif
1391 enable_apic_mode(); 1763 enable_apic_mode();
1392} 1764}
1393 1765
@@ -1400,6 +1772,9 @@ void __init connect_bsp_APIC(void)
1400 */ 1772 */
1401void disconnect_bsp_APIC(int virt_wire_setup) 1773void disconnect_bsp_APIC(int virt_wire_setup)
1402{ 1774{
1775 unsigned int value;
1776
1777#ifdef CONFIG_X86_32
1403 if (pic_mode) { 1778 if (pic_mode) {
1404 /* 1779 /*
1405 * Put the board back into PIC mode (has an effect only on 1780 * Put the board back into PIC mode (has an effect only on
@@ -1411,54 +1786,53 @@ void disconnect_bsp_APIC(int virt_wire_setup)
1411 "entering PIC mode.\n"); 1786 "entering PIC mode.\n");
1412 outb(0x70, 0x22); 1787 outb(0x70, 0x22);
1413 outb(0x00, 0x23); 1788 outb(0x00, 0x23);
1414 } else { 1789 return;
1415 /* Go back to Virtual Wire compatibility mode */ 1790 }
1416 unsigned long value; 1791#endif
1417 1792
1418 /* For the spurious interrupt use vector F, and enable it */ 1793 /* Go back to Virtual Wire compatibility mode */
1419 value = apic_read(APIC_SPIV); 1794
1420 value &= ~APIC_VECTOR_MASK; 1795 /* For the spurious interrupt use vector F, and enable it */
1421 value |= APIC_SPIV_APIC_ENABLED; 1796 value = apic_read(APIC_SPIV);
1422 value |= 0xf; 1797 value &= ~APIC_VECTOR_MASK;
1423 apic_write(APIC_SPIV, value); 1798 value |= APIC_SPIV_APIC_ENABLED;
1424 1799 value |= 0xf;
1425 if (!virt_wire_setup) { 1800 apic_write(APIC_SPIV, value);
1426 /*
1427 * For LVT0 make it edge triggered, active high,
1428 * external and enabled
1429 */
1430 value = apic_read(APIC_LVT0);
1431 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1432 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1433 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1434 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1435 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
1436 apic_write(APIC_LVT0, value);
1437 } else {
1438 /* Disable LVT0 */
1439 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1440 }
1441 1801
1802 if (!virt_wire_setup) {
1442 /* 1803 /*
1443 * For LVT1 make it edge triggered, active high, nmi and 1804 * For LVT0 make it edge triggered, active high,
1444 * enabled 1805 * external and enabled
1445 */ 1806 */
1446 value = apic_read(APIC_LVT1); 1807 value = apic_read(APIC_LVT0);
1447 value &= ~( 1808 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1448 APIC_MODE_MASK | APIC_SEND_PENDING |
1449 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | 1809 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1450 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); 1810 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1451 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; 1811 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1452 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI); 1812 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
1453 apic_write(APIC_LVT1, value); 1813 apic_write(APIC_LVT0, value);
1814 } else {
1815 /* Disable LVT0 */
1816 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1454 } 1817 }
1818
1819 /*
1820 * For LVT1 make it edge triggered, active high,
1821 * nmi and enabled
1822 */
1823 value = apic_read(APIC_LVT1);
1824 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1825 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1826 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1827 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1828 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
1829 apic_write(APIC_LVT1, value);
1455} 1830}
1456 1831
1457void __cpuinit generic_processor_info(int apicid, int version) 1832void __cpuinit generic_processor_info(int apicid, int version)
1458{ 1833{
1459 int cpu; 1834 int cpu;
1460 cpumask_t tmp_map; 1835 cpumask_t tmp_map;
1461 physid_mask_t phys_cpu;
1462 1836
1463 /* 1837 /*
1464 * Validate version 1838 * Validate version
@@ -1471,9 +1845,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
1471 } 1845 }
1472 apic_version[apicid] = version; 1846 apic_version[apicid] = version;
1473 1847
1474 phys_cpu = apicid_to_cpu_present(apicid);
1475 physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
1476
1477 if (num_processors >= NR_CPUS) { 1848 if (num_processors >= NR_CPUS) {
1478 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." 1849 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
1479 " Processor ignored.\n", NR_CPUS); 1850 " Processor ignored.\n", NR_CPUS);
@@ -1484,17 +1855,19 @@ void __cpuinit generic_processor_info(int apicid, int version)
1484 cpus_complement(tmp_map, cpu_present_map); 1855 cpus_complement(tmp_map, cpu_present_map);
1485 cpu = first_cpu(tmp_map); 1856 cpu = first_cpu(tmp_map);
1486 1857
1487 if (apicid == boot_cpu_physical_apicid) 1858 physid_set(apicid, phys_cpu_present_map);
1859 if (apicid == boot_cpu_physical_apicid) {
1488 /* 1860 /*
1489 * x86_bios_cpu_apicid is required to have processors listed 1861 * x86_bios_cpu_apicid is required to have processors listed
1490 * in same order as logical cpu numbers. Hence the first 1862 * in same order as logical cpu numbers. Hence the first
1491 * entry is BSP, and so on. 1863 * entry is BSP, and so on.
1492 */ 1864 */
1493 cpu = 0; 1865 cpu = 0;
1494 1866 }
1495 if (apicid > max_physical_apicid) 1867 if (apicid > max_physical_apicid)
1496 max_physical_apicid = apicid; 1868 max_physical_apicid = apicid;
1497 1869
1870#ifdef CONFIG_X86_32
1498 /* 1871 /*
1499 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y 1872 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
1500 * but we need to work other dependencies like SMP_SUSPEND etc 1873 * but we need to work other dependencies like SMP_SUSPEND etc
@@ -1514,7 +1887,9 @@ void __cpuinit generic_processor_info(int apicid, int version)
1514 def_to_bigsmp = 1; 1887 def_to_bigsmp = 1;
1515 } 1888 }
1516 } 1889 }
1517#ifdef CONFIG_SMP 1890#endif
1891
1892#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64)
1518 /* are we being called early in kernel startup? */ 1893 /* are we being called early in kernel startup? */
1519 if (early_per_cpu_ptr(x86_cpu_to_apicid)) { 1894 if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
1520 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); 1895 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
@@ -1527,16 +1902,29 @@ void __cpuinit generic_processor_info(int apicid, int version)
1527 per_cpu(x86_bios_cpu_apicid, cpu) = apicid; 1902 per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1528 } 1903 }
1529#endif 1904#endif
1905
1530 cpu_set(cpu, cpu_possible_map); 1906 cpu_set(cpu, cpu_possible_map);
1531 cpu_set(cpu, cpu_present_map); 1907 cpu_set(cpu, cpu_present_map);
1532} 1908}
1533 1909
1910#ifdef CONFIG_X86_64
1911int hard_smp_processor_id(void)
1912{
1913 return read_apic_id();
1914}
1915#endif
1916
1534/* 1917/*
1535 * Power management 1918 * Power management
1536 */ 1919 */
1537#ifdef CONFIG_PM 1920#ifdef CONFIG_PM
1538 1921
1539static struct { 1922static struct {
1923 /*
1924 * 'active' is true if the local APIC was enabled by us and
1925 * not the BIOS; this signifies that we are also responsible
1926 * for disabling it before entering apm/acpi suspend
1927 */
1540 int active; 1928 int active;
1541 /* r/w apic fields */ 1929 /* r/w apic fields */
1542 unsigned int apic_id; 1930 unsigned int apic_id;
@@ -1577,7 +1965,7 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state)
1577 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); 1965 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
1578 apic_pm_state.apic_tmict = apic_read(APIC_TMICT); 1966 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
1579 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); 1967 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
1580#ifdef CONFIG_X86_MCE_P4THERMAL 1968#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
1581 if (maxlvt >= 5) 1969 if (maxlvt >= 5)
1582 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); 1970 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
1583#endif 1971#endif
@@ -1601,16 +1989,23 @@ static int lapic_resume(struct sys_device *dev)
1601 1989
1602 local_irq_save(flags); 1990 local_irq_save(flags);
1603 1991
1604 /* 1992#ifdef HAVE_X2APIC
1605 * Make sure the APICBASE points to the right address 1993 if (x2apic)
1606 * 1994 enable_x2apic();
1607 * FIXME! This will be wrong if we ever support suspend on 1995 else
1608 * SMP! We'll need to do this as part of the CPU restore! 1996#endif
1609 */ 1997 {
1610 rdmsr(MSR_IA32_APICBASE, l, h); 1998 /*
1611 l &= ~MSR_IA32_APICBASE_BASE; 1999 * Make sure the APICBASE points to the right address
1612 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; 2000 *
1613 wrmsr(MSR_IA32_APICBASE, l, h); 2001 * FIXME! This will be wrong if we ever support suspend on
2002 * SMP! We'll need to do this as part of the CPU restore!
2003 */
2004 rdmsr(MSR_IA32_APICBASE, l, h);
2005 l &= ~MSR_IA32_APICBASE_BASE;
2006 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
2007 wrmsr(MSR_IA32_APICBASE, l, h);
2008 }
1614 2009
1615 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); 2010 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
1616 apic_write(APIC_ID, apic_pm_state.apic_id); 2011 apic_write(APIC_ID, apic_pm_state.apic_id);
@@ -1620,7 +2015,7 @@ static int lapic_resume(struct sys_device *dev)
1620 apic_write(APIC_SPIV, apic_pm_state.apic_spiv); 2015 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
1621 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0); 2016 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
1622 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1); 2017 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
1623#ifdef CONFIG_X86_MCE_P4THERMAL 2018#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
1624 if (maxlvt >= 5) 2019 if (maxlvt >= 5)
1625 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr); 2020 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
1626#endif 2021#endif
@@ -1634,7 +2029,9 @@ static int lapic_resume(struct sys_device *dev)
1634 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr); 2029 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
1635 apic_write(APIC_ESR, 0); 2030 apic_write(APIC_ESR, 0);
1636 apic_read(APIC_ESR); 2031 apic_read(APIC_ESR);
2032
1637 local_irq_restore(flags); 2033 local_irq_restore(flags);
2034
1638 return 0; 2035 return 0;
1639} 2036}
1640 2037
@@ -1654,7 +2051,7 @@ static struct sys_device device_lapic = {
1654 .cls = &lapic_sysclass, 2051 .cls = &lapic_sysclass,
1655}; 2052};
1656 2053
1657static void __devinit apic_pm_activate(void) 2054static void __cpuinit apic_pm_activate(void)
1658{ 2055{
1659 apic_pm_state.active = 1; 2056 apic_pm_state.active = 1;
1660} 2057}
@@ -1680,30 +2077,101 @@ static void apic_pm_activate(void) { }
1680 2077
1681#endif /* CONFIG_PM */ 2078#endif /* CONFIG_PM */
1682 2079
2080#ifdef CONFIG_X86_64
1683/* 2081/*
1684 * APIC command line parameters 2082 * apic_is_clustered_box() -- Check if we can expect good TSC
2083 *
2084 * Thus far, the major user of this is IBM's Summit2 series:
2085 *
2086 * Clustered boxes may have unsynced TSC problems if they are
2087 * multi-chassis. Use available data to take a good guess.
2088 * If in doubt, go HPET.
1685 */ 2089 */
1686static int __init parse_lapic(char *arg) 2090__cpuinit int apic_is_clustered_box(void)
1687{ 2091{
1688 force_enable_local_apic = 1; 2092 int i, clusters, zeros;
1689 return 0; 2093 unsigned id;
2094 u16 *bios_cpu_apicid;
2095 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
2096
2097 /*
2098 * there is not this kind of box with AMD CPU yet.
2099 * Some AMD box with quadcore cpu and 8 sockets apicid
2100 * will be [4, 0x23] or [8, 0x27] could be thought to
2101 * vsmp box still need checking...
2102 */
2103 if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
2104 return 0;
2105
2106 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
2107 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
2108
2109 for (i = 0; i < NR_CPUS; i++) {
2110 /* are we being called early in kernel startup? */
2111 if (bios_cpu_apicid) {
2112 id = bios_cpu_apicid[i];
2113 }
2114 else if (i < nr_cpu_ids) {
2115 if (cpu_present(i))
2116 id = per_cpu(x86_bios_cpu_apicid, i);
2117 else
2118 continue;
2119 }
2120 else
2121 break;
2122
2123 if (id != BAD_APICID)
2124 __set_bit(APIC_CLUSTERID(id), clustermap);
2125 }
2126
2127 /* Problem: Partially populated chassis may not have CPUs in some of
2128 * the APIC clusters they have been allocated. Only present CPUs have
2129 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
2130 * Since clusters are allocated sequentially, count zeros only if
2131 * they are bounded by ones.
2132 */
2133 clusters = 0;
2134 zeros = 0;
2135 for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
2136 if (test_bit(i, clustermap)) {
2137 clusters += 1 + zeros;
2138 zeros = 0;
2139 } else
2140 ++zeros;
2141 }
2142
2143 /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
2144 * not guaranteed to be synced between boards
2145 */
2146 if (is_vsmp_box() && clusters > 1)
2147 return 1;
2148
2149 /*
2150 * If clusters > 2, then should be multi-chassis.
2151 * May have to revisit this when multi-core + hyperthreaded CPUs come
2152 * out, but AFAIK this will work even for them.
2153 */
2154 return (clusters > 2);
1690} 2155}
1691early_param("lapic", parse_lapic); 2156#endif
1692 2157
1693static int __init parse_nolapic(char *arg) 2158/*
2159 * APIC command line parameters
2160 */
2161static int __init setup_disableapic(char *arg)
1694{ 2162{
1695 disable_apic = 1; 2163 disable_apic = 1;
1696 setup_clear_cpu_cap(X86_FEATURE_APIC); 2164 setup_clear_cpu_cap(X86_FEATURE_APIC);
1697 return 0; 2165 return 0;
1698} 2166}
1699early_param("nolapic", parse_nolapic); 2167early_param("disableapic", setup_disableapic);
1700 2168
1701static int __init parse_disable_lapic_timer(char *arg) 2169/* same as disableapic, for compatibility */
2170static int __init setup_nolapic(char *arg)
1702{ 2171{
1703 local_apic_timer_disabled = 1; 2172 return setup_disableapic(arg);
1704 return 0;
1705} 2173}
1706early_param("nolapic_timer", parse_disable_lapic_timer); 2174early_param("nolapic", setup_nolapic);
1707 2175
1708static int __init parse_lapic_timer_c2_ok(char *arg) 2176static int __init parse_lapic_timer_c2_ok(char *arg)
1709{ 2177{
@@ -1712,15 +2180,39 @@ static int __init parse_lapic_timer_c2_ok(char *arg)
1712} 2180}
1713early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); 2181early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1714 2182
2183static int __init parse_disable_apic_timer(char *arg)
2184{
2185 disable_apic_timer = 1;
2186 return 0;
2187}
2188early_param("noapictimer", parse_disable_apic_timer);
2189
2190static int __init parse_nolapic_timer(char *arg)
2191{
2192 disable_apic_timer = 1;
2193 return 0;
2194}
2195early_param("nolapic_timer", parse_nolapic_timer);
2196
1715static int __init apic_set_verbosity(char *arg) 2197static int __init apic_set_verbosity(char *arg)
1716{ 2198{
1717 if (!arg) 2199 if (!arg) {
2200#ifdef CONFIG_X86_64
2201 skip_ioapic_setup = 0;
2202 return 0;
2203#endif
1718 return -EINVAL; 2204 return -EINVAL;
2205 }
1719 2206
1720 if (strcmp(arg, "debug") == 0) 2207 if (strcmp("debug", arg) == 0)
1721 apic_verbosity = APIC_DEBUG; 2208 apic_verbosity = APIC_DEBUG;
1722 else if (strcmp(arg, "verbose") == 0) 2209 else if (strcmp("verbose", arg) == 0)
1723 apic_verbosity = APIC_VERBOSE; 2210 apic_verbosity = APIC_VERBOSE;
2211 else {
2212 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
2213 " use apic=verbose or apic=debug\n", arg);
2214 return -EINVAL;
2215 }
1724 2216
1725 return 0; 2217 return 0;
1726} 2218}
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
deleted file mode 100644
index 446c062e831c..000000000000
--- a/arch/x86/kernel/apic_64.c
+++ /dev/null
@@ -1,1390 +0,0 @@
1/*
2 * Local APIC handling, local APIC timers
3 *
4 * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
5 *
6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
8 * thanks to Eric Gilmore
9 * and Rolf G. Tews
10 * for testing these extensively.
11 * Maciej W. Rozycki : Various updates and fixes.
12 * Mikael Pettersson : Power Management for UP-APIC.
13 * Pavel Machek and
14 * Mikael Pettersson : PM converted to driver model.
15 */
16
17#include <linux/init.h>
18
19#include <linux/mm.h>
20#include <linux/delay.h>
21#include <linux/bootmem.h>
22#include <linux/interrupt.h>
23#include <linux/mc146818rtc.h>
24#include <linux/kernel_stat.h>
25#include <linux/sysdev.h>
26#include <linux/ioport.h>
27#include <linux/clockchips.h>
28#include <linux/acpi_pmtmr.h>
29#include <linux/module.h>
30
31#include <asm/atomic.h>
32#include <asm/smp.h>
33#include <asm/mtrr.h>
34#include <asm/mpspec.h>
35#include <asm/hpet.h>
36#include <asm/pgalloc.h>
37#include <asm/nmi.h>
38#include <asm/idle.h>
39#include <asm/proto.h>
40#include <asm/timex.h>
41#include <asm/apic.h>
42
43#include <mach_ipi.h>
44#include <mach_apic.h>
45
46static int disable_apic_timer __cpuinitdata;
47static int apic_calibrate_pmtmr __initdata;
48int disable_apic;
49
50/* Local APIC timer works in C2 */
51int local_apic_timer_c2_ok;
52EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
53
54/*
55 * Debug level, exported for io_apic.c
56 */
57unsigned int apic_verbosity;
58
59/* Have we found an MP table */
60int smp_found_config;
61
62static struct resource lapic_resource = {
63 .name = "Local APIC",
64 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
65};
66
67static unsigned int calibration_result;
68
69static int lapic_next_event(unsigned long delta,
70 struct clock_event_device *evt);
71static void lapic_timer_setup(enum clock_event_mode mode,
72 struct clock_event_device *evt);
73static void lapic_timer_broadcast(cpumask_t mask);
74static void apic_pm_activate(void);
75
76static struct clock_event_device lapic_clockevent = {
77 .name = "lapic",
78 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
79 | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
80 .shift = 32,
81 .set_mode = lapic_timer_setup,
82 .set_next_event = lapic_next_event,
83 .broadcast = lapic_timer_broadcast,
84 .rating = 100,
85 .irq = -1,
86};
87static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
88
89static unsigned long apic_phys;
90
91unsigned long mp_lapic_addr;
92
93/*
94 * Get the LAPIC version
95 */
96static inline int lapic_get_version(void)
97{
98 return GET_APIC_VERSION(apic_read(APIC_LVR));
99}
100
101/*
102 * Check, if the APIC is integrated or a seperate chip
103 */
104static inline int lapic_is_integrated(void)
105{
106 return 1;
107}
108
109/*
110 * Check, whether this is a modern or a first generation APIC
111 */
112static int modern_apic(void)
113{
114 /* AMD systems use old APIC versions, so check the CPU */
115 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
116 boot_cpu_data.x86 >= 0xf)
117 return 1;
118 return lapic_get_version() >= 0x14;
119}
120
121void apic_wait_icr_idle(void)
122{
123 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
124 cpu_relax();
125}
126
127u32 safe_apic_wait_icr_idle(void)
128{
129 u32 send_status;
130 int timeout;
131
132 timeout = 0;
133 do {
134 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
135 if (!send_status)
136 break;
137 udelay(100);
138 } while (timeout++ < 1000);
139
140 return send_status;
141}
142
143/**
144 * enable_NMI_through_LVT0 - enable NMI through local vector table 0
145 */
146void __cpuinit enable_NMI_through_LVT0(void)
147{
148 unsigned int v;
149
150 /* unmask and set to NMI */
151 v = APIC_DM_NMI;
152 apic_write(APIC_LVT0, v);
153}
154
155/**
156 * lapic_get_maxlvt - get the maximum number of local vector table entries
157 */
158int lapic_get_maxlvt(void)
159{
160 unsigned int v, maxlvt;
161
162 v = apic_read(APIC_LVR);
163 maxlvt = GET_APIC_MAXLVT(v);
164 return maxlvt;
165}
166
167/*
168 * This function sets up the local APIC timer, with a timeout of
169 * 'clocks' APIC bus clock. During calibration we actually call
170 * this function twice on the boot CPU, once with a bogus timeout
171 * value, second time for real. The other (noncalibrating) CPUs
172 * call this function only once, with the real, calibrated value.
173 *
174 * We do reads before writes even if unnecessary, to get around the
175 * P5 APIC double write bug.
176 */
177
178static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
179{
180 unsigned int lvtt_value, tmp_value;
181
182 lvtt_value = LOCAL_TIMER_VECTOR;
183 if (!oneshot)
184 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
185 if (!irqen)
186 lvtt_value |= APIC_LVT_MASKED;
187
188 apic_write(APIC_LVTT, lvtt_value);
189
190 /*
191 * Divide PICLK by 16
192 */
193 tmp_value = apic_read(APIC_TDCR);
194 apic_write(APIC_TDCR, (tmp_value
195 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
196 | APIC_TDR_DIV_16);
197
198 if (!oneshot)
199 apic_write(APIC_TMICT, clocks);
200}
201
202/*
203 * Setup extended LVT, AMD specific (K8, family 10h)
204 *
205 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
206 * MCE interrupts are supported. Thus MCE offset must be set to 0.
207 */
208
209#define APIC_EILVT_LVTOFF_MCE 0
210#define APIC_EILVT_LVTOFF_IBS 1
211
212static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
213{
214 unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
215 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
216
217 apic_write(reg, v);
218}
219
220u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
221{
222 setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
223 return APIC_EILVT_LVTOFF_MCE;
224}
225
226u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
227{
228 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
229 return APIC_EILVT_LVTOFF_IBS;
230}
231
232/*
233 * Program the next event, relative to now
234 */
235static int lapic_next_event(unsigned long delta,
236 struct clock_event_device *evt)
237{
238 apic_write(APIC_TMICT, delta);
239 return 0;
240}
241
242/*
243 * Setup the lapic timer in periodic or oneshot mode
244 */
245static void lapic_timer_setup(enum clock_event_mode mode,
246 struct clock_event_device *evt)
247{
248 unsigned long flags;
249 unsigned int v;
250
251 /* Lapic used as dummy for broadcast ? */
252 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
253 return;
254
255 local_irq_save(flags);
256
257 switch (mode) {
258 case CLOCK_EVT_MODE_PERIODIC:
259 case CLOCK_EVT_MODE_ONESHOT:
260 __setup_APIC_LVTT(calibration_result,
261 mode != CLOCK_EVT_MODE_PERIODIC, 1);
262 break;
263 case CLOCK_EVT_MODE_UNUSED:
264 case CLOCK_EVT_MODE_SHUTDOWN:
265 v = apic_read(APIC_LVTT);
266 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
267 apic_write(APIC_LVTT, v);
268 break;
269 case CLOCK_EVT_MODE_RESUME:
270 /* Nothing to do here */
271 break;
272 }
273
274 local_irq_restore(flags);
275}
276
277/*
278 * Local APIC timer broadcast function
279 */
280static void lapic_timer_broadcast(cpumask_t mask)
281{
282#ifdef CONFIG_SMP
283 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
284#endif
285}
286
287/*
288 * Setup the local APIC timer for this CPU. Copy the initilized values
289 * of the boot CPU and register the clock event in the framework.
290 */
291static void setup_APIC_timer(void)
292{
293 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
294
295 memcpy(levt, &lapic_clockevent, sizeof(*levt));
296 levt->cpumask = cpumask_of_cpu(smp_processor_id());
297
298 clockevents_register_device(levt);
299}
300
301/*
302 * In this function we calibrate APIC bus clocks to the external
303 * timer. Unfortunately we cannot use jiffies and the timer irq
304 * to calibrate, since some later bootup code depends on getting
305 * the first irq? Ugh.
306 *
307 * We want to do the calibration only once since we
308 * want to have local timer irqs syncron. CPUs connected
309 * by the same APIC bus have the very same bus frequency.
310 * And we want to have irqs off anyways, no accidental
311 * APIC irq that way.
312 */
313
314#define TICK_COUNT 100000000
315
316static int __init calibrate_APIC_clock(void)
317{
318 unsigned apic, apic_start;
319 unsigned long tsc, tsc_start;
320 int result;
321
322 local_irq_disable();
323
324 /*
325 * Put whatever arbitrary (but long enough) timeout
326 * value into the APIC clock, we just want to get the
327 * counter running for calibration.
328 *
329 * No interrupt enable !
330 */
331 __setup_APIC_LVTT(250000000, 0, 0);
332
333 apic_start = apic_read(APIC_TMCCT);
334#ifdef CONFIG_X86_PM_TIMER
335 if (apic_calibrate_pmtmr && pmtmr_ioport) {
336 pmtimer_wait(5000); /* 5ms wait */
337 apic = apic_read(APIC_TMCCT);
338 result = (apic_start - apic) * 1000L / 5;
339 } else
340#endif
341 {
342 rdtscll(tsc_start);
343
344 do {
345 apic = apic_read(APIC_TMCCT);
346 rdtscll(tsc);
347 } while ((tsc - tsc_start) < TICK_COUNT &&
348 (apic_start - apic) < TICK_COUNT);
349
350 result = (apic_start - apic) * 1000L * tsc_khz /
351 (tsc - tsc_start);
352 }
353
354 local_irq_enable();
355
356 printk(KERN_DEBUG "APIC timer calibration result %d\n", result);
357
358 printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
359 result / 1000 / 1000, result / 1000 % 1000);
360
361 /* Calculate the scaled math multiplication factor */
362 lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC,
363 lapic_clockevent.shift);
364 lapic_clockevent.max_delta_ns =
365 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
366 lapic_clockevent.min_delta_ns =
367 clockevent_delta2ns(0xF, &lapic_clockevent);
368
369 calibration_result = result / HZ;
370
371 /*
372 * Do a sanity check on the APIC calibration result
373 */
374 if (calibration_result < (1000000 / HZ)) {
375 printk(KERN_WARNING
376 "APIC frequency too slow, disabling apic timer\n");
377 return -1;
378 }
379
380 return 0;
381}
382
383/*
384 * Setup the boot APIC
385 *
386 * Calibrate and verify the result.
387 */
388void __init setup_boot_APIC_clock(void)
389{
390 /*
391 * The local apic timer can be disabled via the kernel commandline.
392 * Register the lapic timer as a dummy clock event source on SMP
393 * systems, so the broadcast mechanism is used. On UP systems simply
394 * ignore it.
395 */
396 if (disable_apic_timer) {
397 printk(KERN_INFO "Disabling APIC timer\n");
398 /* No broadcast on UP ! */
399 if (num_possible_cpus() > 1) {
400 lapic_clockevent.mult = 1;
401 setup_APIC_timer();
402 }
403 return;
404 }
405
406 printk(KERN_INFO "Using local APIC timer interrupts.\n");
407 if (calibrate_APIC_clock()) {
408 /* No broadcast on UP ! */
409 if (num_possible_cpus() > 1)
410 setup_APIC_timer();
411 return;
412 }
413
414 /*
415 * If nmi_watchdog is set to IO_APIC, we need the
416 * PIT/HPET going. Otherwise register lapic as a dummy
417 * device.
418 */
419 if (nmi_watchdog != NMI_IO_APIC)
420 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
421 else
422 printk(KERN_WARNING "APIC timer registered as dummy,"
423 " due to nmi_watchdog=%d!\n", nmi_watchdog);
424
425 setup_APIC_timer();
426}
427
428void __cpuinit setup_secondary_APIC_clock(void)
429{
430 setup_APIC_timer();
431}
432
433/*
434 * The guts of the apic timer interrupt
435 */
436static void local_apic_timer_interrupt(void)
437{
438 int cpu = smp_processor_id();
439 struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
440
441 /*
442 * Normally we should not be here till LAPIC has been initialized but
443 * in some cases like kdump, its possible that there is a pending LAPIC
444 * timer interrupt from previous kernel's context and is delivered in
445 * new kernel the moment interrupts are enabled.
446 *
447 * Interrupts are enabled early and LAPIC is setup much later, hence
448 * its possible that when we get here evt->event_handler is NULL.
449 * Check for event_handler being NULL and discard the interrupt as
450 * spurious.
451 */
452 if (!evt->event_handler) {
453 printk(KERN_WARNING
454 "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
455 /* Switch it off */
456 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
457 return;
458 }
459
460 /*
461 * the NMI deadlock-detector uses this.
462 */
463 add_pda(apic_timer_irqs, 1);
464
465 evt->event_handler(evt);
466}
467
468/*
469 * Local APIC timer interrupt. This is the most natural way for doing
470 * local interrupts, but local timer interrupts can be emulated by
471 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
472 *
473 * [ if a single-CPU system runs an SMP kernel then we call the local
474 * interrupt as well. Thus we cannot inline the local irq ... ]
475 */
476void smp_apic_timer_interrupt(struct pt_regs *regs)
477{
478 struct pt_regs *old_regs = set_irq_regs(regs);
479
480 /*
481 * NOTE! We'd better ACK the irq immediately,
482 * because timer handling can be slow.
483 */
484 ack_APIC_irq();
485 /*
486 * update_process_times() expects us to have done irq_enter().
487 * Besides, if we don't timer interrupts ignore the global
488 * interrupt lock, which is the WrongThing (tm) to do.
489 */
490 exit_idle();
491 irq_enter();
492 local_apic_timer_interrupt();
493 irq_exit();
494 set_irq_regs(old_regs);
495}
496
497int setup_profiling_timer(unsigned int multiplier)
498{
499 return -EINVAL;
500}
501
502
503/*
504 * Local APIC start and shutdown
505 */
506
507/**
508 * clear_local_APIC - shutdown the local APIC
509 *
510 * This is called, when a CPU is disabled and before rebooting, so the state of
511 * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
512 * leftovers during boot.
513 */
514void clear_local_APIC(void)
515{
516 int maxlvt;
517 u32 v;
518
519 /* APIC hasn't been mapped yet */
520 if (!apic_phys)
521 return;
522
523 maxlvt = lapic_get_maxlvt();
524 /*
525 * Masking an LVT entry can trigger a local APIC error
526 * if the vector is zero. Mask LVTERR first to prevent this.
527 */
528 if (maxlvt >= 3) {
529 v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
530 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
531 }
532 /*
533 * Careful: we have to set masks only first to deassert
534 * any level-triggered sources.
535 */
536 v = apic_read(APIC_LVTT);
537 apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
538 v = apic_read(APIC_LVT0);
539 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
540 v = apic_read(APIC_LVT1);
541 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
542 if (maxlvt >= 4) {
543 v = apic_read(APIC_LVTPC);
544 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
545 }
546
547 /*
548 * Clean APIC state for other OSs:
549 */
550 apic_write(APIC_LVTT, APIC_LVT_MASKED);
551 apic_write(APIC_LVT0, APIC_LVT_MASKED);
552 apic_write(APIC_LVT1, APIC_LVT_MASKED);
553 if (maxlvt >= 3)
554 apic_write(APIC_LVTERR, APIC_LVT_MASKED);
555 if (maxlvt >= 4)
556 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
557 apic_write(APIC_ESR, 0);
558 apic_read(APIC_ESR);
559}
560
561/**
562 * disable_local_APIC - clear and disable the local APIC
563 */
564void disable_local_APIC(void)
565{
566 unsigned int value;
567
568 clear_local_APIC();
569
570 /*
571 * Disable APIC (implies clearing of registers
572 * for 82489DX!).
573 */
574 value = apic_read(APIC_SPIV);
575 value &= ~APIC_SPIV_APIC_ENABLED;
576 apic_write(APIC_SPIV, value);
577}
578
579void lapic_shutdown(void)
580{
581 unsigned long flags;
582
583 if (!cpu_has_apic)
584 return;
585
586 local_irq_save(flags);
587
588 disable_local_APIC();
589
590 local_irq_restore(flags);
591}
592
593/*
594 * This is to verify that we're looking at a real local APIC.
595 * Check these against your board if the CPUs aren't getting
596 * started for no apparent reason.
597 */
598int __init verify_local_APIC(void)
599{
600 unsigned int reg0, reg1;
601
602 /*
603 * The version register is read-only in a real APIC.
604 */
605 reg0 = apic_read(APIC_LVR);
606 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
607 apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
608 reg1 = apic_read(APIC_LVR);
609 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
610
611 /*
612 * The two version reads above should print the same
613 * numbers. If the second one is different, then we
614 * poke at a non-APIC.
615 */
616 if (reg1 != reg0)
617 return 0;
618
619 /*
620 * Check if the version looks reasonably.
621 */
622 reg1 = GET_APIC_VERSION(reg0);
623 if (reg1 == 0x00 || reg1 == 0xff)
624 return 0;
625 reg1 = lapic_get_maxlvt();
626 if (reg1 < 0x02 || reg1 == 0xff)
627 return 0;
628
629 /*
630 * The ID register is read/write in a real APIC.
631 */
632 reg0 = read_apic_id();
633 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
634 apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
635 reg1 = read_apic_id();
636 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
637 apic_write(APIC_ID, reg0);
638 if (reg1 != (reg0 ^ APIC_ID_MASK))
639 return 0;
640
641 /*
642 * The next two are just to see if we have sane values.
643 * They're only really relevant if we're in Virtual Wire
644 * compatibility mode, but most boxes are anymore.
645 */
646 reg0 = apic_read(APIC_LVT0);
647 apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
648 reg1 = apic_read(APIC_LVT1);
649 apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
650
651 return 1;
652}
653
654/**
655 * sync_Arb_IDs - synchronize APIC bus arbitration IDs
656 */
657void __init sync_Arb_IDs(void)
658{
659 /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 */
660 if (modern_apic())
661 return;
662
663 /*
664 * Wait for idle.
665 */
666 apic_wait_icr_idle();
667
668 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
669 apic_write(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
670 | APIC_DM_INIT);
671}
672
673/*
674 * An initial setup of the virtual wire mode.
675 */
676void __init init_bsp_APIC(void)
677{
678 unsigned int value;
679
680 /*
681 * Don't do the setup now if we have a SMP BIOS as the
682 * through-I/O-APIC virtual wire mode might be active.
683 */
684 if (smp_found_config || !cpu_has_apic)
685 return;
686
687 value = apic_read(APIC_LVR);
688
689 /*
690 * Do not trust the local APIC being empty at bootup.
691 */
692 clear_local_APIC();
693
694 /*
695 * Enable APIC.
696 */
697 value = apic_read(APIC_SPIV);
698 value &= ~APIC_VECTOR_MASK;
699 value |= APIC_SPIV_APIC_ENABLED;
700 value |= APIC_SPIV_FOCUS_DISABLED;
701 value |= SPURIOUS_APIC_VECTOR;
702 apic_write(APIC_SPIV, value);
703
704 /*
705 * Set up the virtual wire mode.
706 */
707 apic_write(APIC_LVT0, APIC_DM_EXTINT);
708 value = APIC_DM_NMI;
709 apic_write(APIC_LVT1, value);
710}
711
712/**
713 * setup_local_APIC - setup the local APIC
714 */
715void __cpuinit setup_local_APIC(void)
716{
717 unsigned int value;
718 int i, j;
719
720 preempt_disable();
721 value = apic_read(APIC_LVR);
722
723 BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f);
724
725 /*
726 * Double-check whether this APIC is really registered.
727 * This is meaningless in clustered apic mode, so we skip it.
728 */
729 if (!apic_id_registered())
730 BUG();
731
732 /*
733 * Intel recommends to set DFR, LDR and TPR before enabling
734 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
735 * document number 292116). So here it goes...
736 */
737 init_apic_ldr();
738
739 /*
740 * Set Task Priority to 'accept all'. We never change this
741 * later on.
742 */
743 value = apic_read(APIC_TASKPRI);
744 value &= ~APIC_TPRI_MASK;
745 apic_write(APIC_TASKPRI, value);
746
747 /*
748 * After a crash, we no longer service the interrupts and a pending
749 * interrupt from previous kernel might still have ISR bit set.
750 *
751 * Most probably by now CPU has serviced that pending interrupt and
752 * it might not have done the ack_APIC_irq() because it thought,
753 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
754 * does not clear the ISR bit and cpu thinks it has already serivced
755 * the interrupt. Hence a vector might get locked. It was noticed
756 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
757 */
758 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
759 value = apic_read(APIC_ISR + i*0x10);
760 for (j = 31; j >= 0; j--) {
761 if (value & (1<<j))
762 ack_APIC_irq();
763 }
764 }
765
766 /*
767 * Now that we are all set up, enable the APIC
768 */
769 value = apic_read(APIC_SPIV);
770 value &= ~APIC_VECTOR_MASK;
771 /*
772 * Enable APIC
773 */
774 value |= APIC_SPIV_APIC_ENABLED;
775
776 /* We always use processor focus */
777
778 /*
779 * Set spurious IRQ vector
780 */
781 value |= SPURIOUS_APIC_VECTOR;
782 apic_write(APIC_SPIV, value);
783
784 /*
785 * Set up LVT0, LVT1:
786 *
787 * set up through-local-APIC on the BP's LINT0. This is not
788 * strictly necessary in pure symmetric-IO mode, but sometimes
789 * we delegate interrupts to the 8259A.
790 */
791 /*
792 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
793 */
794 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
795 if (!smp_processor_id() && !value) {
796 value = APIC_DM_EXTINT;
797 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
798 smp_processor_id());
799 } else {
800 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
801 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
802 smp_processor_id());
803 }
804 apic_write(APIC_LVT0, value);
805
806 /*
807 * only the BP should see the LINT1 NMI signal, obviously.
808 */
809 if (!smp_processor_id())
810 value = APIC_DM_NMI;
811 else
812 value = APIC_DM_NMI | APIC_LVT_MASKED;
813 apic_write(APIC_LVT1, value);
814 preempt_enable();
815}
816
817static void __cpuinit lapic_setup_esr(void)
818{
819 unsigned maxlvt = lapic_get_maxlvt();
820
821 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR);
822 /*
823 * spec says clear errors after enabling vector.
824 */
825 if (maxlvt > 3)
826 apic_write(APIC_ESR, 0);
827}
828
829void __cpuinit end_local_APIC_setup(void)
830{
831 lapic_setup_esr();
832 setup_apic_nmi_watchdog(NULL);
833 apic_pm_activate();
834}
835
836/*
837 * Detect and enable local APICs on non-SMP boards.
838 * Original code written by Keir Fraser.
839 * On AMD64 we trust the BIOS - if it says no APIC it is likely
840 * not correctly set up (usually the APIC timer won't work etc.)
841 */
842static int __init detect_init_APIC(void)
843{
844 if (!cpu_has_apic) {
845 printk(KERN_INFO "No local APIC present\n");
846 return -1;
847 }
848
849 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
850 boot_cpu_physical_apicid = 0;
851 return 0;
852}
853
854void __init early_init_lapic_mapping(void)
855{
856 unsigned long phys_addr;
857
858 /*
859 * If no local APIC can be found then go out
860 * : it means there is no mpatable and MADT
861 */
862 if (!smp_found_config)
863 return;
864
865 phys_addr = mp_lapic_addr;
866
867 set_fixmap_nocache(FIX_APIC_BASE, phys_addr);
868 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
869 APIC_BASE, phys_addr);
870
871 /*
872 * Fetch the APIC ID of the BSP in case we have a
873 * default configuration (or the MP table is broken).
874 */
875 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
876}
877
878/**
879 * init_apic_mappings - initialize APIC mappings
880 */
881void __init init_apic_mappings(void)
882{
883 /*
884 * If no local APIC can be found then set up a fake all
885 * zeroes page to simulate the local APIC and another
886 * one for the IO-APIC.
887 */
888 if (!smp_found_config && detect_init_APIC()) {
889 apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
890 apic_phys = __pa(apic_phys);
891 } else
892 apic_phys = mp_lapic_addr;
893
894 set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
895 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
896 APIC_BASE, apic_phys);
897
898 /*
899 * Fetch the APIC ID of the BSP in case we have a
900 * default configuration (or the MP table is broken).
901 */
902 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
903}
904
905/*
906 * This initializes the IO-APIC and APIC hardware if this is
907 * a UP kernel.
908 */
909int __init APIC_init_uniprocessor(void)
910{
911 if (disable_apic) {
912 printk(KERN_INFO "Apic disabled\n");
913 return -1;
914 }
915 if (!cpu_has_apic) {
916 disable_apic = 1;
917 printk(KERN_INFO "Apic disabled by BIOS\n");
918 return -1;
919 }
920
921 verify_local_APIC();
922
923 connect_bsp_APIC();
924
925 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
926 apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
927
928 setup_local_APIC();
929
930 /*
931 * Now enable IO-APICs, actually call clear_IO_APIC
932 * We need clear_IO_APIC before enabling vector on BP
933 */
934 if (!skip_ioapic_setup && nr_ioapics)
935 enable_IO_APIC();
936
937 if (!smp_found_config || skip_ioapic_setup || !nr_ioapics)
938 localise_nmi_watchdog();
939 end_local_APIC_setup();
940
941 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
942 setup_IO_APIC();
943 else
944 nr_ioapics = 0;
945 setup_boot_APIC_clock();
946 check_nmi_watchdog();
947 return 0;
948}
949
950/*
951 * Local APIC interrupts
952 */
953
954/*
955 * This interrupt should _never_ happen with our APIC/SMP architecture
956 */
957asmlinkage void smp_spurious_interrupt(void)
958{
959 unsigned int v;
960 exit_idle();
961 irq_enter();
962 /*
963 * Check if this really is a spurious interrupt and ACK it
964 * if it is a vectored one. Just in case...
965 * Spurious interrupts should not be ACKed.
966 */
967 v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
968 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
969 ack_APIC_irq();
970
971 add_pda(irq_spurious_count, 1);
972 irq_exit();
973}
974
975/*
976 * This interrupt should never happen with our APIC/SMP architecture
977 */
978asmlinkage void smp_error_interrupt(void)
979{
980 unsigned int v, v1;
981
982 exit_idle();
983 irq_enter();
984 /* First tickle the hardware, only then report what went on. -- REW */
985 v = apic_read(APIC_ESR);
986 apic_write(APIC_ESR, 0);
987 v1 = apic_read(APIC_ESR);
988 ack_APIC_irq();
989 atomic_inc(&irq_err_count);
990
991 /* Here is what the APIC error bits mean:
992 0: Send CS error
993 1: Receive CS error
994 2: Send accept error
995 3: Receive accept error
996 4: Reserved
997 5: Send illegal vector
998 6: Received illegal vector
999 7: Illegal register address
1000 */
1001 printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
1002 smp_processor_id(), v , v1);
1003 irq_exit();
1004}
1005
1006/**
1007 * * connect_bsp_APIC - attach the APIC to the interrupt system
1008 * */
1009void __init connect_bsp_APIC(void)
1010{
1011 enable_apic_mode();
1012}
1013
1014void disconnect_bsp_APIC(int virt_wire_setup)
1015{
1016 /* Go back to Virtual Wire compatibility mode */
1017 unsigned long value;
1018
1019 /* For the spurious interrupt use vector F, and enable it */
1020 value = apic_read(APIC_SPIV);
1021 value &= ~APIC_VECTOR_MASK;
1022 value |= APIC_SPIV_APIC_ENABLED;
1023 value |= 0xf;
1024 apic_write(APIC_SPIV, value);
1025
1026 if (!virt_wire_setup) {
1027 /*
1028 * For LVT0 make it edge triggered, active high,
1029 * external and enabled
1030 */
1031 value = apic_read(APIC_LVT0);
1032 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1033 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1034 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1035 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1036 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
1037 apic_write(APIC_LVT0, value);
1038 } else {
1039 /* Disable LVT0 */
1040 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1041 }
1042
1043 /* For LVT1 make it edge triggered, active high, nmi and enabled */
1044 value = apic_read(APIC_LVT1);
1045 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1046 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1047 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1048 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1049 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
1050 apic_write(APIC_LVT1, value);
1051}
1052
1053void __cpuinit generic_processor_info(int apicid, int version)
1054{
1055 int cpu;
1056 cpumask_t tmp_map;
1057
1058 if (num_processors >= NR_CPUS) {
1059 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
1060 " Processor ignored.\n", NR_CPUS);
1061 return;
1062 }
1063
1064 num_processors++;
1065 cpus_complement(tmp_map, cpu_present_map);
1066 cpu = first_cpu(tmp_map);
1067
1068 physid_set(apicid, phys_cpu_present_map);
1069 if (apicid == boot_cpu_physical_apicid) {
1070 /*
1071 * x86_bios_cpu_apicid is required to have processors listed
1072 * in same order as logical cpu numbers. Hence the first
1073 * entry is BSP, and so on.
1074 */
1075 cpu = 0;
1076 }
1077 if (apicid > max_physical_apicid)
1078 max_physical_apicid = apicid;
1079
1080 /* are we being called early in kernel startup? */
1081 if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
1082 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
1083 u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
1084
1085 cpu_to_apicid[cpu] = apicid;
1086 bios_cpu_apicid[cpu] = apicid;
1087 } else {
1088 per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1089 per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1090 }
1091
1092 cpu_set(cpu, cpu_possible_map);
1093 cpu_set(cpu, cpu_present_map);
1094}
1095
1096/*
1097 * Power management
1098 */
1099#ifdef CONFIG_PM
1100
1101static struct {
1102 /* 'active' is true if the local APIC was enabled by us and
1103 not the BIOS; this signifies that we are also responsible
1104 for disabling it before entering apm/acpi suspend */
1105 int active;
1106 /* r/w apic fields */
1107 unsigned int apic_id;
1108 unsigned int apic_taskpri;
1109 unsigned int apic_ldr;
1110 unsigned int apic_dfr;
1111 unsigned int apic_spiv;
1112 unsigned int apic_lvtt;
1113 unsigned int apic_lvtpc;
1114 unsigned int apic_lvt0;
1115 unsigned int apic_lvt1;
1116 unsigned int apic_lvterr;
1117 unsigned int apic_tmict;
1118 unsigned int apic_tdcr;
1119 unsigned int apic_thmr;
1120} apic_pm_state;
1121
1122static int lapic_suspend(struct sys_device *dev, pm_message_t state)
1123{
1124 unsigned long flags;
1125 int maxlvt;
1126
1127 if (!apic_pm_state.active)
1128 return 0;
1129
1130 maxlvt = lapic_get_maxlvt();
1131
1132 apic_pm_state.apic_id = read_apic_id();
1133 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
1134 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
1135 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
1136 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
1137 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
1138 if (maxlvt >= 4)
1139 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
1140 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
1141 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
1142 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
1143 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
1144 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
1145#ifdef CONFIG_X86_MCE_INTEL
1146 if (maxlvt >= 5)
1147 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
1148#endif
1149 local_irq_save(flags);
1150 disable_local_APIC();
1151 local_irq_restore(flags);
1152 return 0;
1153}
1154
1155static int lapic_resume(struct sys_device *dev)
1156{
1157 unsigned int l, h;
1158 unsigned long flags;
1159 int maxlvt;
1160
1161 if (!apic_pm_state.active)
1162 return 0;
1163
1164 maxlvt = lapic_get_maxlvt();
1165
1166 local_irq_save(flags);
1167 rdmsr(MSR_IA32_APICBASE, l, h);
1168 l &= ~MSR_IA32_APICBASE_BASE;
1169 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
1170 wrmsr(MSR_IA32_APICBASE, l, h);
1171 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
1172 apic_write(APIC_ID, apic_pm_state.apic_id);
1173 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
1174 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
1175 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
1176 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
1177 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
1178 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
1179#ifdef CONFIG_X86_MCE_INTEL
1180 if (maxlvt >= 5)
1181 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
1182#endif
1183 if (maxlvt >= 4)
1184 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
1185 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
1186 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
1187 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
1188 apic_write(APIC_ESR, 0);
1189 apic_read(APIC_ESR);
1190 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
1191 apic_write(APIC_ESR, 0);
1192 apic_read(APIC_ESR);
1193 local_irq_restore(flags);
1194 return 0;
1195}
1196
1197static struct sysdev_class lapic_sysclass = {
1198 .name = "lapic",
1199 .resume = lapic_resume,
1200 .suspend = lapic_suspend,
1201};
1202
1203static struct sys_device device_lapic = {
1204 .id = 0,
1205 .cls = &lapic_sysclass,
1206};
1207
1208static void __cpuinit apic_pm_activate(void)
1209{
1210 apic_pm_state.active = 1;
1211}
1212
1213static int __init init_lapic_sysfs(void)
1214{
1215 int error;
1216
1217 if (!cpu_has_apic)
1218 return 0;
1219 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
1220
1221 error = sysdev_class_register(&lapic_sysclass);
1222 if (!error)
1223 error = sysdev_register(&device_lapic);
1224 return error;
1225}
1226device_initcall(init_lapic_sysfs);
1227
1228#else /* CONFIG_PM */
1229
1230static void apic_pm_activate(void) { }
1231
1232#endif /* CONFIG_PM */
1233
1234/*
1235 * apic_is_clustered_box() -- Check if we can expect good TSC
1236 *
1237 * Thus far, the major user of this is IBM's Summit2 series:
1238 *
1239 * Clustered boxes may have unsynced TSC problems if they are
1240 * multi-chassis. Use available data to take a good guess.
1241 * If in doubt, go HPET.
1242 */
1243__cpuinit int apic_is_clustered_box(void)
1244{
1245 int i, clusters, zeros;
1246 unsigned id;
1247 u16 *bios_cpu_apicid;
1248 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
1249
1250 /*
1251 * there is not this kind of box with AMD CPU yet.
1252 * Some AMD box with quadcore cpu and 8 sockets apicid
1253 * will be [4, 0x23] or [8, 0x27] could be thought to
1254 * vsmp box still need checking...
1255 */
1256 if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
1257 return 0;
1258
1259 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
1260 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
1261
1262 for (i = 0; i < NR_CPUS; i++) {
1263 /* are we being called early in kernel startup? */
1264 if (bios_cpu_apicid) {
1265 id = bios_cpu_apicid[i];
1266 }
1267 else if (i < nr_cpu_ids) {
1268 if (cpu_present(i))
1269 id = per_cpu(x86_bios_cpu_apicid, i);
1270 else
1271 continue;
1272 }
1273 else
1274 break;
1275
1276 if (id != BAD_APICID)
1277 __set_bit(APIC_CLUSTERID(id), clustermap);
1278 }
1279
1280 /* Problem: Partially populated chassis may not have CPUs in some of
1281 * the APIC clusters they have been allocated. Only present CPUs have
1282 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
1283 * Since clusters are allocated sequentially, count zeros only if
1284 * they are bounded by ones.
1285 */
1286 clusters = 0;
1287 zeros = 0;
1288 for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
1289 if (test_bit(i, clustermap)) {
1290 clusters += 1 + zeros;
1291 zeros = 0;
1292 } else
1293 ++zeros;
1294 }
1295
1296 /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
1297 * not guaranteed to be synced between boards
1298 */
1299 if (is_vsmp_box() && clusters > 1)
1300 return 1;
1301
1302 /*
1303 * If clusters > 2, then should be multi-chassis.
1304 * May have to revisit this when multi-core + hyperthreaded CPUs come
1305 * out, but AFAIK this will work even for them.
1306 */
1307 return (clusters > 2);
1308}
1309
1310/*
1311 * APIC command line parameters
1312 */
1313static int __init apic_set_verbosity(char *str)
1314{
1315 if (str == NULL) {
1316 skip_ioapic_setup = 0;
1317 ioapic_force = 1;
1318 return 0;
1319 }
1320 if (strcmp("debug", str) == 0)
1321 apic_verbosity = APIC_DEBUG;
1322 else if (strcmp("verbose", str) == 0)
1323 apic_verbosity = APIC_VERBOSE;
1324 else {
1325 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
1326 " use apic=verbose or apic=debug\n", str);
1327 return -EINVAL;
1328 }
1329
1330 return 0;
1331}
1332early_param("apic", apic_set_verbosity);
1333
1334static __init int setup_disableapic(char *str)
1335{
1336 disable_apic = 1;
1337 setup_clear_cpu_cap(X86_FEATURE_APIC);
1338 return 0;
1339}
1340early_param("disableapic", setup_disableapic);
1341
1342/* same as disableapic, for compatibility */
1343static __init int setup_nolapic(char *str)
1344{
1345 return setup_disableapic(str);
1346}
1347early_param("nolapic", setup_nolapic);
1348
1349static int __init parse_lapic_timer_c2_ok(char *arg)
1350{
1351 local_apic_timer_c2_ok = 1;
1352 return 0;
1353}
1354early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1355
1356static __init int setup_noapictimer(char *str)
1357{
1358 if (str[0] != ' ' && str[0] != 0)
1359 return 0;
1360 disable_apic_timer = 1;
1361 return 1;
1362}
1363__setup("noapictimer", setup_noapictimer);
1364
1365static __init int setup_apicpmtimer(char *s)
1366{
1367 apic_calibrate_pmtmr = 1;
1368 notsc_setup(NULL);
1369 return 0;
1370}
1371__setup("apicpmtimer", setup_apicpmtimer);
1372
1373static int __init lapic_insert_resource(void)
1374{
1375 if (!apic_phys)
1376 return -1;
1377
1378 /* Put local APIC into the resource map. */
1379 lapic_resource.start = apic_phys;
1380 lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
1381 insert_resource(&iomem_resource, &lapic_resource);
1382
1383 return 0;
1384}
1385
1386/*
1387 * need call insert after e820_reserve_resources()
1388 * that is using request_resource
1389 */
1390late_initcall(lapic_insert_resource);
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 732d1f4e10ee..5145a6e72bbb 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -228,7 +228,6 @@
228#include <linux/suspend.h> 228#include <linux/suspend.h>
229#include <linux/kthread.h> 229#include <linux/kthread.h>
230#include <linux/jiffies.h> 230#include <linux/jiffies.h>
231#include <linux/smp_lock.h>
232 231
233#include <asm/system.h> 232#include <asm/system.h>
234#include <asm/uaccess.h> 233#include <asm/uaccess.h>
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index aa89387006fe..505543a75a56 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -22,7 +22,7 @@
22 22
23#define __NO_STUBS 1 23#define __NO_STUBS 1
24#undef __SYSCALL 24#undef __SYSCALL
25#undef _ASM_X86_64_UNISTD_H_ 25#undef ASM_X86__UNISTD_64_H
26#define __SYSCALL(nr, sym) [nr] = 1, 26#define __SYSCALL(nr, sym) [nr] = 1,
27static char syscalls[] = { 27static char syscalls[] = {
28#include <asm/unistd.h> 28#include <asm/unistd.h>
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index c639bd55391c..f0dfe6f17e7e 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * BIOS run time interface routines. 2 * BIOS run time interface routines.
3 * 3 *
4 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
@@ -16,33 +14,128 @@
16 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
19 * Copyright (c) Russ Anderson
19 */ 20 */
20 21
22#include <linux/efi.h>
23#include <asm/efi.h>
24#include <linux/io.h>
21#include <asm/uv/bios.h> 25#include <asm/uv/bios.h>
26#include <asm/uv/uv_hub.h>
27
28struct uv_systab uv_systab;
22 29
23const char * 30s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
24x86_bios_strerror(long status)
25{ 31{
26 const char *str; 32 struct uv_systab *tab = &uv_systab;
27 switch (status) { 33
28 case 0: str = "Call completed without error"; break; 34 if (!tab->function)
29 case -1: str = "Not implemented"; break; 35 /*
30 case -2: str = "Invalid argument"; break; 36 * BIOS does not support UV systab
31 case -3: str = "Call completed with error"; break; 37 */
32 default: str = "Unknown BIOS status code"; break; 38 return BIOS_STATUS_UNIMPLEMENTED;
33 } 39
34 return str; 40 return efi_call6((void *)__va(tab->function),
41 (u64)which, a1, a2, a3, a4, a5);
35} 42}
36 43
37long 44s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
38x86_bios_freq_base(unsigned long which, unsigned long *ticks_per_second, 45 u64 a4, u64 a5)
39 unsigned long *drift_info)
40{ 46{
41 struct uv_bios_retval isrv; 47 unsigned long bios_flags;
48 s64 ret;
42 49
43 BIOS_CALL(isrv, BIOS_FREQ_BASE, which, 0, 0, 0, 0, 0, 0); 50 local_irq_save(bios_flags);
44 *ticks_per_second = isrv.v0; 51 ret = uv_bios_call(which, a1, a2, a3, a4, a5);
45 *drift_info = isrv.v1; 52 local_irq_restore(bios_flags);
46 return isrv.status; 53
54 return ret;
47} 55}
48EXPORT_SYMBOL_GPL(x86_bios_freq_base); 56
57s64 uv_bios_call_reentrant(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
58 u64 a4, u64 a5)
59{
60 s64 ret;
61
62 preempt_disable();
63 ret = uv_bios_call(which, a1, a2, a3, a4, a5);
64 preempt_enable();
65
66 return ret;
67}
68
69
70long sn_partition_id;
71EXPORT_SYMBOL_GPL(sn_partition_id);
72long uv_coherency_id;
73EXPORT_SYMBOL_GPL(uv_coherency_id);
74long uv_region_size;
75EXPORT_SYMBOL_GPL(uv_region_size);
76int uv_type;
77
78
79s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
80 long *region)
81{
82 s64 ret;
83 u64 v0, v1;
84 union partition_info_u part;
85
86 ret = uv_bios_call_irqsave(UV_BIOS_GET_SN_INFO, fc,
87 (u64)(&v0), (u64)(&v1), 0, 0);
88 if (ret != BIOS_STATUS_SUCCESS)
89 return ret;
90
91 part.val = v0;
92 if (uvtype)
93 *uvtype = part.hub_version;
94 if (partid)
95 *partid = part.partition_id;
96 if (coher)
97 *coher = part.coherence_id;
98 if (region)
99 *region = part.region_size;
100 return ret;
101}
102
103
104s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second)
105{
106 return uv_bios_call(UV_BIOS_FREQ_BASE, clock_type,
107 (u64)ticks_per_second, 0, 0, 0);
108}
109EXPORT_SYMBOL_GPL(uv_bios_freq_base);
110
111
112#ifdef CONFIG_EFI
113void uv_bios_init(void)
114{
115 struct uv_systab *tab;
116
117 if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) ||
118 (efi.uv_systab == (unsigned long)NULL)) {
119 printk(KERN_CRIT "No EFI UV System Table.\n");
120 uv_systab.function = (unsigned long)NULL;
121 return;
122 }
123
124 tab = (struct uv_systab *)ioremap(efi.uv_systab,
125 sizeof(struct uv_systab));
126 if (strncmp(tab->signature, "UVST", 4) != 0)
127 printk(KERN_ERR "bad signature in UV system table!");
128
129 /*
130 * Copy table to permanent spot for later use.
131 */
132 memcpy(&uv_systab, tab, sizeof(struct uv_systab));
133 iounmap(tab);
134
135 printk(KERN_INFO "EFI UV System Table Revision %d\n", tab->revision);
136}
137#else /* !CONFIG_EFI */
138
139void uv_bios_init(void) { }
140#endif
141
diff --git a/arch/x86/kernel/cpu/.gitignore b/arch/x86/kernel/cpu/.gitignore
new file mode 100644
index 000000000000..667df55a4399
--- /dev/null
+++ b/arch/x86/kernel/cpu/.gitignore
@@ -0,0 +1 @@
capflags.c
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index ee76eaad3001..7f0b45a5d788 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -3,22 +3,30 @@
3# 3#
4 4
5obj-y := intel_cacheinfo.o addon_cpuid_features.o 5obj-y := intel_cacheinfo.o addon_cpuid_features.o
6obj-y += proc.o feature_names.o 6obj-y += proc.o capflags.o powerflags.o common.o
7 7
8obj-$(CONFIG_X86_32) += common.o bugs.o 8obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o
9obj-$(CONFIG_X86_64) += common_64.o bugs_64.o 9obj-$(CONFIG_X86_64) += bugs_64.o
10obj-$(CONFIG_X86_32) += amd.o 10
11obj-$(CONFIG_X86_64) += amd_64.o 11obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
12obj-$(CONFIG_X86_32) += cyrix.o 12obj-$(CONFIG_CPU_SUP_AMD) += amd.o
13obj-$(CONFIG_X86_32) += centaur.o 13obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
14obj-$(CONFIG_X86_64) += centaur_64.o 14obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o
15obj-$(CONFIG_X86_32) += transmeta.o 15obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o
16obj-$(CONFIG_X86_32) += intel.o 16obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
17obj-$(CONFIG_X86_64) += intel_64.o 17obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
18obj-$(CONFIG_X86_32) += umc.o
19 18
20obj-$(CONFIG_X86_MCE) += mcheck/ 19obj-$(CONFIG_X86_MCE) += mcheck/
21obj-$(CONFIG_MTRR) += mtrr/ 20obj-$(CONFIG_MTRR) += mtrr/
22obj-$(CONFIG_CPU_FREQ) += cpufreq/ 21obj-$(CONFIG_CPU_FREQ) += cpufreq/
23 22
24obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o 23obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
24
25quiet_cmd_mkcapflags = MKCAP $@
26 cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@
27
28cpufeature = $(src)/../../../../include/asm-x86/cpufeature.h
29
30targets += capflags.c
31$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.pl FORCE
32 $(call if_changed,mkcapflags)
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index a6ef672adbba..0d9c993aa93e 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -7,6 +7,8 @@
7#include <asm/pat.h> 7#include <asm/pat.h>
8#include <asm/processor.h> 8#include <asm/processor.h>
9 9
10#include <mach_apic.h>
11
10struct cpuid_bit { 12struct cpuid_bit {
11 u16 feature; 13 u16 feature;
12 u8 reg; 14 u8 reg;
@@ -48,6 +50,92 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
48 } 50 }
49} 51}
50 52
53/* leaf 0xb SMT level */
54#define SMT_LEVEL 0
55
56/* leaf 0xb sub-leaf types */
57#define INVALID_TYPE 0
58#define SMT_TYPE 1
59#define CORE_TYPE 2
60
61#define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff)
62#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
63#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
64
65/*
66 * Check for extended topology enumeration cpuid leaf 0xb and if it
67 * exists, use it for populating initial_apicid and cpu topology
68 * detection.
69 */
70void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
71{
72#ifdef CONFIG_SMP
73 unsigned int eax, ebx, ecx, edx, sub_index;
74 unsigned int ht_mask_width, core_plus_mask_width;
75 unsigned int core_select_mask, core_level_siblings;
76
77 if (c->cpuid_level < 0xb)
78 return;
79
80 cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
81
82 /*
83 * check if the cpuid leaf 0xb is actually implemented.
84 */
85 if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
86 return;
87
88 set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
89
90 /*
91 * initial apic id, which also represents 32-bit extended x2apic id.
92 */
93 c->initial_apicid = edx;
94
95 /*
96 * Populate HT related information from sub-leaf level 0.
97 */
98 core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
99 core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
100
101 sub_index = 1;
102 do {
103 cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
104
105 /*
106 * Check for the Core type in the implemented sub leaves.
107 */
108 if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
109 core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
110 core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
111 break;
112 }
113
114 sub_index++;
115 } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
116
117 core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
118
119#ifdef CONFIG_X86_32
120 c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
121 & core_select_mask;
122 c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width);
123#else
124 c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
125 c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
126#endif
127 c->x86_max_cores = (core_level_siblings / smp_num_siblings);
128
129
130 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
131 c->phys_proc_id);
132 if (c->x86_max_cores > 1)
133 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
134 c->cpu_core_id);
135 return;
136#endif
137}
138
51#ifdef CONFIG_X86_PAT 139#ifdef CONFIG_X86_PAT
52void __cpuinit validate_pat_support(struct cpuinfo_x86 *c) 140void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
53{ 141{
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 18514ed26104..8f1e31db2ad5 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -1,13 +1,22 @@
1#include <linux/init.h> 1#include <linux/init.h>
2#include <linux/bitops.h> 2#include <linux/bitops.h>
3#include <linux/mm.h> 3#include <linux/mm.h>
4
4#include <asm/io.h> 5#include <asm/io.h>
5#include <asm/processor.h> 6#include <asm/processor.h>
6#include <asm/apic.h> 7#include <asm/apic.h>
7 8
9#ifdef CONFIG_X86_64
10# include <asm/numa_64.h>
11# include <asm/mmconfig.h>
12# include <asm/cacheflush.h>
13#endif
14
8#include <mach_apic.h> 15#include <mach_apic.h>
16
9#include "cpu.h" 17#include "cpu.h"
10 18
19#ifdef CONFIG_X86_32
11/* 20/*
12 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause 21 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
13 * misexecution of code under Linux. Owners of such processors should 22 * misexecution of code under Linux. Owners of such processors should
@@ -24,26 +33,273 @@
24extern void vide(void); 33extern void vide(void);
25__asm__(".align 4\nvide: ret"); 34__asm__(".align 4\nvide: ret");
26 35
27static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) 36static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
28{ 37{
29 if (cpuid_eax(0x80000000) >= 0x80000007) { 38/*
30 c->x86_power = cpuid_edx(0x80000007); 39 * General Systems BIOSen alias the cpu frequency registers
31 if (c->x86_power & (1<<8)) 40 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
32 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 41 * drivers subsequently pokes it, and changes the CPU speed.
42 * Workaround : Remove the unneeded alias.
43 */
44#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
45#define CBAR_ENB (0x80000000)
46#define CBAR_KEY (0X000000CB)
47 if (c->x86_model == 9 || c->x86_model == 10) {
48 if (inl (CBAR) & CBAR_ENB)
49 outl (0 | CBAR_KEY, CBAR);
33 } 50 }
34
35 /* Set MTRR capability flag if appropriate */
36 if (c->x86_model == 13 || c->x86_model == 9 ||
37 (c->x86_model == 8 && c->x86_mask >= 8))
38 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
39} 51}
40 52
41static void __cpuinit init_amd(struct cpuinfo_x86 *c) 53
54static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
42{ 55{
43 u32 l, h; 56 u32 l, h;
44 int mbytes = num_physpages >> (20-PAGE_SHIFT); 57 int mbytes = num_physpages >> (20-PAGE_SHIFT);
45 int r;
46 58
59 if (c->x86_model < 6) {
60 /* Based on AMD doc 20734R - June 2000 */
61 if (c->x86_model == 0) {
62 clear_cpu_cap(c, X86_FEATURE_APIC);
63 set_cpu_cap(c, X86_FEATURE_PGE);
64 }
65 return;
66 }
67
68 if (c->x86_model == 6 && c->x86_mask == 1) {
69 const int K6_BUG_LOOP = 1000000;
70 int n;
71 void (*f_vide)(void);
72 unsigned long d, d2;
73
74 printk(KERN_INFO "AMD K6 stepping B detected - ");
75
76 /*
77 * It looks like AMD fixed the 2.6.2 bug and improved indirect
78 * calls at the same time.
79 */
80
81 n = K6_BUG_LOOP;
82 f_vide = vide;
83 rdtscl(d);
84 while (n--)
85 f_vide();
86 rdtscl(d2);
87 d = d2-d;
88
89 if (d > 20*K6_BUG_LOOP)
90 printk("system stability may be impaired when more than 32 MB are used.\n");
91 else
92 printk("probably OK (after B9730xxxx).\n");
93 printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
94 }
95
96 /* K6 with old style WHCR */
97 if (c->x86_model < 8 ||
98 (c->x86_model == 8 && c->x86_mask < 8)) {
99 /* We can only write allocate on the low 508Mb */
100 if (mbytes > 508)
101 mbytes = 508;
102
103 rdmsr(MSR_K6_WHCR, l, h);
104 if ((l&0x0000FFFF) == 0) {
105 unsigned long flags;
106 l = (1<<0)|((mbytes/4)<<1);
107 local_irq_save(flags);
108 wbinvd();
109 wrmsr(MSR_K6_WHCR, l, h);
110 local_irq_restore(flags);
111 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
112 mbytes);
113 }
114 return;
115 }
116
117 if ((c->x86_model == 8 && c->x86_mask > 7) ||
118 c->x86_model == 9 || c->x86_model == 13) {
119 /* The more serious chips .. */
120
121 if (mbytes > 4092)
122 mbytes = 4092;
123
124 rdmsr(MSR_K6_WHCR, l, h);
125 if ((l&0xFFFF0000) == 0) {
126 unsigned long flags;
127 l = ((mbytes>>2)<<22)|(1<<16);
128 local_irq_save(flags);
129 wbinvd();
130 wrmsr(MSR_K6_WHCR, l, h);
131 local_irq_restore(flags);
132 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
133 mbytes);
134 }
135
136 return;
137 }
138
139 if (c->x86_model == 10) {
140 /* AMD Geode LX is model 10 */
141 /* placeholder for any needed mods */
142 return;
143 }
144}
145
146static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
147{
148 u32 l, h;
149
150 /*
151 * Bit 15 of Athlon specific MSR 15, needs to be 0
152 * to enable SSE on Palomino/Morgan/Barton CPU's.
153 * If the BIOS didn't enable it already, enable it here.
154 */
155 if (c->x86_model >= 6 && c->x86_model <= 10) {
156 if (!cpu_has(c, X86_FEATURE_XMM)) {
157 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
158 rdmsr(MSR_K7_HWCR, l, h);
159 l &= ~0x00008000;
160 wrmsr(MSR_K7_HWCR, l, h);
161 set_cpu_cap(c, X86_FEATURE_XMM);
162 }
163 }
164
165 /*
166 * It's been determined by AMD that Athlons since model 8 stepping 1
167 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
168 * As per AMD technical note 27212 0.2
169 */
170 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
171 rdmsr(MSR_K7_CLK_CTL, l, h);
172 if ((l & 0xfff00000) != 0x20000000) {
173 printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
174 ((l & 0x000fffff)|0x20000000));
175 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
176 }
177 }
178
179 set_cpu_cap(c, X86_FEATURE_K7);
180}
181#endif
182
183#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
184static int __cpuinit nearby_node(int apicid)
185{
186 int i, node;
187
188 for (i = apicid - 1; i >= 0; i--) {
189 node = apicid_to_node[i];
190 if (node != NUMA_NO_NODE && node_online(node))
191 return node;
192 }
193 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
194 node = apicid_to_node[i];
195 if (node != NUMA_NO_NODE && node_online(node))
196 return node;
197 }
198 return first_node(node_online_map); /* Shouldn't happen */
199}
200#endif
201
202/*
203 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
204 * Assumes number of cores is a power of two.
205 */
206static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
207{
208#ifdef CONFIG_X86_HT
209 unsigned bits;
210
211 bits = c->x86_coreid_bits;
212
213 /* Low order bits define the core id (index of core in socket) */
214 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
215 /* Convert the initial APIC ID into the socket ID */
216 c->phys_proc_id = c->initial_apicid >> bits;
217#endif
218}
219
220static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
221{
222#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
223 int cpu = smp_processor_id();
224 int node;
225 unsigned apicid = hard_smp_processor_id();
226
227 node = c->phys_proc_id;
228 if (apicid_to_node[apicid] != NUMA_NO_NODE)
229 node = apicid_to_node[apicid];
230 if (!node_online(node)) {
231 /* Two possibilities here:
232 - The CPU is missing memory and no node was created.
233 In that case try picking one from a nearby CPU
234 - The APIC IDs differ from the HyperTransport node IDs
235 which the K8 northbridge parsing fills in.
236 Assume they are all increased by a constant offset,
237 but in the same order as the HT nodeids.
238 If that doesn't result in a usable node fall back to the
239 path for the previous case. */
240
241 int ht_nodeid = c->initial_apicid;
242
243 if (ht_nodeid >= 0 &&
244 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
245 node = apicid_to_node[ht_nodeid];
246 /* Pick a nearby node */
247 if (!node_online(node))
248 node = nearby_node(apicid);
249 }
250 numa_set_node(cpu, node);
251
252 printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
253#endif
254}
255
256static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
257{
258#ifdef CONFIG_X86_HT
259 unsigned bits, ecx;
260
261 /* Multi core CPU? */
262 if (c->extended_cpuid_level < 0x80000008)
263 return;
264
265 ecx = cpuid_ecx(0x80000008);
266
267 c->x86_max_cores = (ecx & 0xff) + 1;
268
269 /* CPU telling us the core id bits shift? */
270 bits = (ecx >> 12) & 0xF;
271
272 /* Otherwise recompute */
273 if (bits == 0) {
274 while ((1 << bits) < c->x86_max_cores)
275 bits++;
276 }
277
278 c->x86_coreid_bits = bits;
279#endif
280}
281
282static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
283{
284 early_init_amd_mc(c);
285
286 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
287 if (c->x86_power & (1<<8))
288 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
289
290#ifdef CONFIG_X86_64
291 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
292#else
293 /* Set MTRR capability flag if appropriate */
294 if (c->x86 == 5)
295 if (c->x86_model == 13 || c->x86_model == 9 ||
296 (c->x86_model == 8 && c->x86_mask >= 8))
297 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
298#endif
299}
300
301static void __cpuinit init_amd(struct cpuinfo_x86 *c)
302{
47#ifdef CONFIG_SMP 303#ifdef CONFIG_SMP
48 unsigned long long value; 304 unsigned long long value;
49 305
@@ -54,7 +310,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
54 * Errata 63 for SH-B3 steppings 310 * Errata 63 for SH-B3 steppings
55 * Errata 122 for all steppings (F+ have it disabled by default) 311 * Errata 122 for all steppings (F+ have it disabled by default)
56 */ 312 */
57 if (c->x86 == 15) { 313 if (c->x86 == 0xf) {
58 rdmsrl(MSR_K7_HWCR, value); 314 rdmsrl(MSR_K7_HWCR, value);
59 value |= 1 << 6; 315 value |= 1 << 6;
60 wrmsrl(MSR_K7_HWCR, value); 316 wrmsrl(MSR_K7_HWCR, value);
@@ -64,209 +320,119 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
64 early_init_amd(c); 320 early_init_amd(c);
65 321
66 /* 322 /*
67 * FIXME: We should handle the K5 here. Set up the write
68 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
69 * no bus pipeline)
70 */
71
72 /*
73 * Bit 31 in normal CPUID used for nonstandard 3DNow ID; 323 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
74 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway 324 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
75 */ 325 */
76 clear_cpu_cap(c, 0*32+31); 326 clear_cpu_cap(c, 0*32+31);
77 327
78 r = get_model_name(c); 328#ifdef CONFIG_X86_64
329 /* On C+ stepping K8 rep microcode works well for copy/memset */
330 if (c->x86 == 0xf) {
331 u32 level;
79 332
80 switch (c->x86) { 333 level = cpuid_eax(1);
81 case 4: 334 if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
82 /* 335 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
83 * General Systems BIOSen alias the cpu frequency registers
84 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
85 * drivers subsequently pokes it, and changes the CPU speed.
86 * Workaround : Remove the unneeded alias.
87 */
88#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
89#define CBAR_ENB (0x80000000)
90#define CBAR_KEY (0X000000CB)
91 if (c->x86_model == 9 || c->x86_model == 10) {
92 if (inl (CBAR) & CBAR_ENB)
93 outl (0 | CBAR_KEY, CBAR);
94 }
95 break;
96 case 5:
97 if (c->x86_model < 6) {
98 /* Based on AMD doc 20734R - June 2000 */
99 if (c->x86_model == 0) {
100 clear_cpu_cap(c, X86_FEATURE_APIC);
101 set_cpu_cap(c, X86_FEATURE_PGE);
102 }
103 break;
104 }
105
106 if (c->x86_model == 6 && c->x86_mask == 1) {
107 const int K6_BUG_LOOP = 1000000;
108 int n;
109 void (*f_vide)(void);
110 unsigned long d, d2;
111
112 printk(KERN_INFO "AMD K6 stepping B detected - ");
113
114 /*
115 * It looks like AMD fixed the 2.6.2 bug and improved indirect
116 * calls at the same time.
117 */
118
119 n = K6_BUG_LOOP;
120 f_vide = vide;
121 rdtscl(d);
122 while (n--)
123 f_vide();
124 rdtscl(d2);
125 d = d2-d;
126
127 if (d > 20*K6_BUG_LOOP)
128 printk("system stability may be impaired when more than 32 MB are used.\n");
129 else
130 printk("probably OK (after B9730xxxx).\n");
131 printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
132 }
133
134 /* K6 with old style WHCR */
135 if (c->x86_model < 8 ||
136 (c->x86_model == 8 && c->x86_mask < 8)) {
137 /* We can only write allocate on the low 508Mb */
138 if (mbytes > 508)
139 mbytes = 508;
140
141 rdmsr(MSR_K6_WHCR, l, h);
142 if ((l&0x0000FFFF) == 0) {
143 unsigned long flags;
144 l = (1<<0)|((mbytes/4)<<1);
145 local_irq_save(flags);
146 wbinvd();
147 wrmsr(MSR_K6_WHCR, l, h);
148 local_irq_restore(flags);
149 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
150 mbytes);
151 }
152 break;
153 }
154
155 if ((c->x86_model == 8 && c->x86_mask > 7) ||
156 c->x86_model == 9 || c->x86_model == 13) {
157 /* The more serious chips .. */
158
159 if (mbytes > 4092)
160 mbytes = 4092;
161
162 rdmsr(MSR_K6_WHCR, l, h);
163 if ((l&0xFFFF0000) == 0) {
164 unsigned long flags;
165 l = ((mbytes>>2)<<22)|(1<<16);
166 local_irq_save(flags);
167 wbinvd();
168 wrmsr(MSR_K6_WHCR, l, h);
169 local_irq_restore(flags);
170 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
171 mbytes);
172 }
173
174 break;
175 }
176
177 if (c->x86_model == 10) {
178 /* AMD Geode LX is model 10 */
179 /* placeholder for any needed mods */
180 break;
181 }
182 break;
183 case 6: /* An Athlon/Duron */
184
185 /*
186 * Bit 15 of Athlon specific MSR 15, needs to be 0
187 * to enable SSE on Palomino/Morgan/Barton CPU's.
188 * If the BIOS didn't enable it already, enable it here.
189 */
190 if (c->x86_model >= 6 && c->x86_model <= 10) {
191 if (!cpu_has(c, X86_FEATURE_XMM)) {
192 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
193 rdmsr(MSR_K7_HWCR, l, h);
194 l &= ~0x00008000;
195 wrmsr(MSR_K7_HWCR, l, h);
196 set_cpu_cap(c, X86_FEATURE_XMM);
197 }
198 }
199
200 /*
201 * It's been determined by AMD that Athlons since model 8 stepping 1
202 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
203 * As per AMD technical note 27212 0.2
204 */
205 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
206 rdmsr(MSR_K7_CLK_CTL, l, h);
207 if ((l & 0xfff00000) != 0x20000000) {
208 printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
209 ((l & 0x000fffff)|0x20000000));
210 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
211 }
212 }
213 break;
214 } 336 }
337 if (c->x86 == 0x10 || c->x86 == 0x11)
338 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
339#else
340
341 /*
342 * FIXME: We should handle the K5 here. Set up the write
343 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
344 * no bus pipeline)
345 */
215 346
216 switch (c->x86) { 347 switch (c->x86) {
217 case 15: 348 case 4:
218 /* Use K8 tuning for Fam10h and Fam11h */ 349 init_amd_k5(c);
219 case 0x10:
220 case 0x11:
221 set_cpu_cap(c, X86_FEATURE_K8);
222 break; 350 break;
223 case 6: 351 case 5:
224 set_cpu_cap(c, X86_FEATURE_K7); 352 init_amd_k6(c);
353 break;
354 case 6: /* An Athlon/Duron */
355 init_amd_k7(c);
225 break; 356 break;
226 } 357 }
358
359 /* K6s reports MCEs but don't actually have all the MSRs */
360 if (c->x86 < 6)
361 clear_cpu_cap(c, X86_FEATURE_MCE);
362#endif
363
364 /* Enable workaround for FXSAVE leak */
227 if (c->x86 >= 6) 365 if (c->x86 >= 6)
228 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); 366 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
229 367
230 display_cacheinfo(c); 368 if (!c->x86_model_id[0]) {
231 369 switch (c->x86) {
232 if (cpuid_eax(0x80000000) >= 0x80000008) 370 case 0xf:
233 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; 371 /* Should distinguish Models here, but this is only
372 a fallback anyways. */
373 strcpy(c->x86_model_id, "Hammer");
374 break;
375 }
376 }
234 377
235#ifdef CONFIG_X86_HT 378 display_cacheinfo(c);
236 /*
237 * On a AMD multi core setup the lower bits of the APIC id
238 * distinguish the cores.
239 */
240 if (c->x86_max_cores > 1) {
241 int cpu = smp_processor_id();
242 unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf;
243 379
244 if (bits == 0) { 380 /* Multi core CPU? */
245 while ((1 << bits) < c->x86_max_cores) 381 if (c->extended_cpuid_level >= 0x80000008) {
246 bits++; 382 amd_detect_cmp(c);
247 } 383 srat_detect_node(c);
248 c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1);
249 c->phys_proc_id >>= bits;
250 printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
251 cpu, c->x86_max_cores, c->cpu_core_id);
252 } 384 }
385
386#ifdef CONFIG_X86_32
387 detect_ht(c);
253#endif 388#endif
254 389
255 if (cpuid_eax(0x80000000) >= 0x80000006) { 390 if (c->extended_cpuid_level >= 0x80000006) {
256 if ((c->x86 == 0x10) && (cpuid_edx(0x80000006) & 0xf000)) 391 if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000))
257 num_cache_leaves = 4; 392 num_cache_leaves = 4;
258 else 393 else
259 num_cache_leaves = 3; 394 num_cache_leaves = 3;
260 } 395 }
261 396
262 /* K6s reports MCEs but don't actually have all the MSRs */ 397 if (c->x86 >= 0xf && c->x86 <= 0x11)
263 if (c->x86 < 6) 398 set_cpu_cap(c, X86_FEATURE_K8);
264 clear_cpu_cap(c, X86_FEATURE_MCE);
265 399
266 if (cpu_has_xmm2) 400 if (cpu_has_xmm2) {
401 /* MFENCE stops RDTSC speculation */
267 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); 402 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
403 }
404
405#ifdef CONFIG_X86_64
406 if (c->x86 == 0x10) {
407 /* do this for boot cpu */
408 if (c == &boot_cpu_data)
409 check_enable_amd_mmconf_dmi();
410
411 fam10h_check_enable_mmcfg();
412 }
413
414 if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) {
415 unsigned long long tseg;
416
417 /*
418 * Split up direct mapping around the TSEG SMM area.
419 * Don't do it for gbpages because there seems very little
420 * benefit in doing so.
421 */
422 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
423 printk(KERN_DEBUG "tseg: %010llx\n", tseg);
424 if ((tseg>>PMD_SHIFT) <
425 (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
426 ((tseg>>PMD_SHIFT) <
427 (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
428 (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
429 set_memory_4k((unsigned long)__va(tseg), 1);
430 }
431 }
432#endif
268} 433}
269 434
435#ifdef CONFIG_X86_32
270static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) 436static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
271{ 437{
272 /* AMD errata T13 (order #21922) */ 438 /* AMD errata T13 (order #21922) */
@@ -279,10 +445,12 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int
279 } 445 }
280 return size; 446 return size;
281} 447}
448#endif
282 449
283static struct cpu_dev amd_cpu_dev __cpuinitdata = { 450static struct cpu_dev amd_cpu_dev __cpuinitdata = {
284 .c_vendor = "AMD", 451 .c_vendor = "AMD",
285 .c_ident = { "AuthenticAMD" }, 452 .c_ident = { "AuthenticAMD" },
453#ifdef CONFIG_X86_32
286 .c_models = { 454 .c_models = {
287 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = 455 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
288 { 456 {
@@ -295,9 +463,11 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
295 } 463 }
296 }, 464 },
297 }, 465 },
466 .c_size_cache = amd_size_cache,
467#endif
298 .c_early_init = early_init_amd, 468 .c_early_init = early_init_amd,
299 .c_init = init_amd, 469 .c_init = init_amd,
300 .c_size_cache = amd_size_cache, 470 .c_x86_vendor = X86_VENDOR_AMD,
301}; 471};
302 472
303cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); 473cpu_dev_register(amd_cpu_dev);
diff --git a/arch/x86/kernel/cpu/amd_64.c b/arch/x86/kernel/cpu/amd_64.c
deleted file mode 100644
index d1692b2a41ff..000000000000
--- a/arch/x86/kernel/cpu/amd_64.c
+++ /dev/null
@@ -1,224 +0,0 @@
1#include <linux/init.h>
2#include <linux/mm.h>
3
4#include <asm/numa_64.h>
5#include <asm/mmconfig.h>
6#include <asm/cacheflush.h>
7
8#include <mach_apic.h>
9
10#include "cpu.h"
11
12int force_mwait __cpuinitdata;
13
14#ifdef CONFIG_NUMA
15static int __cpuinit nearby_node(int apicid)
16{
17 int i, node;
18
19 for (i = apicid - 1; i >= 0; i--) {
20 node = apicid_to_node[i];
21 if (node != NUMA_NO_NODE && node_online(node))
22 return node;
23 }
24 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
25 node = apicid_to_node[i];
26 if (node != NUMA_NO_NODE && node_online(node))
27 return node;
28 }
29 return first_node(node_online_map); /* Shouldn't happen */
30}
31#endif
32
33/*
34 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
35 * Assumes number of cores is a power of two.
36 */
37static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
38{
39#ifdef CONFIG_SMP
40 unsigned bits;
41#ifdef CONFIG_NUMA
42 int cpu = smp_processor_id();
43 int node = 0;
44 unsigned apicid = hard_smp_processor_id();
45#endif
46 bits = c->x86_coreid_bits;
47
48 /* Low order bits define the core id (index of core in socket) */
49 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
50 /* Convert the initial APIC ID into the socket ID */
51 c->phys_proc_id = c->initial_apicid >> bits;
52
53#ifdef CONFIG_NUMA
54 node = c->phys_proc_id;
55 if (apicid_to_node[apicid] != NUMA_NO_NODE)
56 node = apicid_to_node[apicid];
57 if (!node_online(node)) {
58 /* Two possibilities here:
59 - The CPU is missing memory and no node was created.
60 In that case try picking one from a nearby CPU
61 - The APIC IDs differ from the HyperTransport node IDs
62 which the K8 northbridge parsing fills in.
63 Assume they are all increased by a constant offset,
64 but in the same order as the HT nodeids.
65 If that doesn't result in a usable node fall back to the
66 path for the previous case. */
67
68 int ht_nodeid = c->initial_apicid;
69
70 if (ht_nodeid >= 0 &&
71 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
72 node = apicid_to_node[ht_nodeid];
73 /* Pick a nearby node */
74 if (!node_online(node))
75 node = nearby_node(apicid);
76 }
77 numa_set_node(cpu, node);
78
79 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
80#endif
81#endif
82}
83
84static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
85{
86#ifdef CONFIG_SMP
87 unsigned bits, ecx;
88
89 /* Multi core CPU? */
90 if (c->extended_cpuid_level < 0x80000008)
91 return;
92
93 ecx = cpuid_ecx(0x80000008);
94
95 c->x86_max_cores = (ecx & 0xff) + 1;
96
97 /* CPU telling us the core id bits shift? */
98 bits = (ecx >> 12) & 0xF;
99
100 /* Otherwise recompute */
101 if (bits == 0) {
102 while ((1 << bits) < c->x86_max_cores)
103 bits++;
104 }
105
106 c->x86_coreid_bits = bits;
107
108#endif
109}
110
111static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
112{
113 early_init_amd_mc(c);
114
115 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
116 if (c->x86_power & (1<<8))
117 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
118
119 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
120}
121
122static void __cpuinit init_amd(struct cpuinfo_x86 *c)
123{
124 unsigned level;
125
126#ifdef CONFIG_SMP
127 unsigned long value;
128
129 /*
130 * Disable TLB flush filter by setting HWCR.FFDIS on K8
131 * bit 6 of msr C001_0015
132 *
133 * Errata 63 for SH-B3 steppings
134 * Errata 122 for all steppings (F+ have it disabled by default)
135 */
136 if (c->x86 == 0xf) {
137 rdmsrl(MSR_K8_HWCR, value);
138 value |= 1 << 6;
139 wrmsrl(MSR_K8_HWCR, value);
140 }
141#endif
142
143 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
144 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
145 clear_cpu_cap(c, 0*32+31);
146
147 /* On C+ stepping K8 rep microcode works well for copy/memset */
148 if (c->x86 == 0xf) {
149 level = cpuid_eax(1);
150 if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
151 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
152 }
153 if (c->x86 == 0x10 || c->x86 == 0x11)
154 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
155
156 /* Enable workaround for FXSAVE leak */
157 if (c->x86 >= 6)
158 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
159
160 level = get_model_name(c);
161 if (!level) {
162 switch (c->x86) {
163 case 0xf:
164 /* Should distinguish Models here, but this is only
165 a fallback anyways. */
166 strcpy(c->x86_model_id, "Hammer");
167 break;
168 }
169 }
170 display_cacheinfo(c);
171
172 /* Multi core CPU? */
173 if (c->extended_cpuid_level >= 0x80000008)
174 amd_detect_cmp(c);
175
176 if (c->extended_cpuid_level >= 0x80000006 &&
177 (cpuid_edx(0x80000006) & 0xf000))
178 num_cache_leaves = 4;
179 else
180 num_cache_leaves = 3;
181
182 if (c->x86 >= 0xf && c->x86 <= 0x11)
183 set_cpu_cap(c, X86_FEATURE_K8);
184
185 /* MFENCE stops RDTSC speculation */
186 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
187
188 if (c->x86 == 0x10) {
189 /* do this for boot cpu */
190 if (c == &boot_cpu_data)
191 check_enable_amd_mmconf_dmi();
192
193 fam10h_check_enable_mmcfg();
194 }
195
196 if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) {
197 unsigned long long tseg;
198
199 /*
200 * Split up direct mapping around the TSEG SMM area.
201 * Don't do it for gbpages because there seems very little
202 * benefit in doing so.
203 */
204 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
205 printk(KERN_DEBUG "tseg: %010llx\n", tseg);
206 if ((tseg>>PMD_SHIFT) <
207 (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
208 ((tseg>>PMD_SHIFT) <
209 (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
210 (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
211 set_memory_4k((unsigned long)__va(tseg), 1);
212 }
213 }
214}
215
216static struct cpu_dev amd_cpu_dev __cpuinitdata = {
217 .c_vendor = "AMD",
218 .c_ident = { "AuthenticAMD" },
219 .c_early_init = early_init_amd,
220 .c_init = init_amd,
221};
222
223cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev);
224
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index a0534c04d38a..89bfdd9cacc6 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -289,7 +289,6 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c)
289 if (c->x86_model >= 6 && c->x86_model < 9) 289 if (c->x86_model >= 6 && c->x86_model < 9)
290 set_cpu_cap(c, X86_FEATURE_3DNOW); 290 set_cpu_cap(c, X86_FEATURE_3DNOW);
291 291
292 get_model_name(c);
293 display_cacheinfo(c); 292 display_cacheinfo(c);
294} 293}
295 294
@@ -475,6 +474,7 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
475 .c_early_init = early_init_centaur, 474 .c_early_init = early_init_centaur,
476 .c_init = init_centaur, 475 .c_init = init_centaur,
477 .c_size_cache = centaur_size_cache, 476 .c_size_cache = centaur_size_cache,
477 .c_x86_vendor = X86_VENDOR_CENTAUR,
478}; 478};
479 479
480cpu_vendor_dev_register(X86_VENDOR_CENTAUR, &centaur_cpu_dev); 480cpu_dev_register(centaur_cpu_dev);
diff --git a/arch/x86/kernel/cpu/centaur_64.c b/arch/x86/kernel/cpu/centaur_64.c
index 1d181c40e2e1..a1625f5a1e78 100644
--- a/arch/x86/kernel/cpu/centaur_64.c
+++ b/arch/x86/kernel/cpu/centaur_64.c
@@ -16,9 +16,10 @@ static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
16 16
17static void __cpuinit init_centaur(struct cpuinfo_x86 *c) 17static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
18{ 18{
19 early_init_centaur(c);
20
19 if (c->x86 == 0x6 && c->x86_model >= 0xf) { 21 if (c->x86 == 0x6 && c->x86_model >= 0xf) {
20 c->x86_cache_alignment = c->x86_clflush_size * 2; 22 c->x86_cache_alignment = c->x86_clflush_size * 2;
21 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
22 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 23 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
23 } 24 }
24 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 25 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
@@ -29,7 +30,8 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
29 .c_ident = { "CentaurHauls" }, 30 .c_ident = { "CentaurHauls" },
30 .c_early_init = early_init_centaur, 31 .c_early_init = early_init_centaur,
31 .c_init = init_centaur, 32 .c_init = init_centaur,
33 .c_x86_vendor = X86_VENDOR_CENTAUR,
32}; 34};
33 35
34cpu_vendor_dev_register(X86_VENDOR_CENTAUR, &centaur_cpu_dev); 36cpu_dev_register(centaur_cpu_dev);
35 37
diff --git a/arch/x86/kernel/cpu/cmpxchg.c b/arch/x86/kernel/cpu/cmpxchg.c
new file mode 100644
index 000000000000..2056ccf572cc
--- /dev/null
+++ b/arch/x86/kernel/cpu/cmpxchg.c
@@ -0,0 +1,72 @@
1/*
2 * cmpxchg*() fallbacks for CPU not supporting these instructions
3 */
4
5#include <linux/kernel.h>
6#include <linux/smp.h>
7#include <linux/module.h>
8
9#ifndef CONFIG_X86_CMPXCHG
10unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
11{
12 u8 prev;
13 unsigned long flags;
14
15 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
16 local_irq_save(flags);
17 prev = *(u8 *)ptr;
18 if (prev == old)
19 *(u8 *)ptr = new;
20 local_irq_restore(flags);
21 return prev;
22}
23EXPORT_SYMBOL(cmpxchg_386_u8);
24
25unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new)
26{
27 u16 prev;
28 unsigned long flags;
29
30 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
31 local_irq_save(flags);
32 prev = *(u16 *)ptr;
33 if (prev == old)
34 *(u16 *)ptr = new;
35 local_irq_restore(flags);
36 return prev;
37}
38EXPORT_SYMBOL(cmpxchg_386_u16);
39
40unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
41{
42 u32 prev;
43 unsigned long flags;
44
45 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
46 local_irq_save(flags);
47 prev = *(u32 *)ptr;
48 if (prev == old)
49 *(u32 *)ptr = new;
50 local_irq_restore(flags);
51 return prev;
52}
53EXPORT_SYMBOL(cmpxchg_386_u32);
54#endif
55
56#ifndef CONFIG_X86_CMPXCHG64
57unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
58{
59 u64 prev;
60 unsigned long flags;
61
62 /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
63 local_irq_save(flags);
64 prev = *(u64 *)ptr;
65 if (prev == old)
66 *(u64 *)ptr = new;
67 local_irq_restore(flags);
68 return prev;
69}
70EXPORT_SYMBOL(cmpxchg_486_u64);
71#endif
72
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4e456bd955bb..25581dcb280e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1,28 +1,62 @@
1#include <linux/init.h> 1#include <linux/init.h>
2#include <linux/kernel.h>
3#include <linux/sched.h>
2#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/bootmem.h>
6#include <linux/bitops.h>
7#include <linux/module.h>
8#include <linux/kgdb.h>
9#include <linux/topology.h>
3#include <linux/delay.h> 10#include <linux/delay.h>
4#include <linux/smp.h> 11#include <linux/smp.h>
5#include <linux/module.h>
6#include <linux/percpu.h> 12#include <linux/percpu.h>
7#include <linux/bootmem.h>
8#include <asm/processor.h>
9#include <asm/i387.h> 13#include <asm/i387.h>
10#include <asm/msr.h> 14#include <asm/msr.h>
11#include <asm/io.h> 15#include <asm/io.h>
16#include <asm/linkage.h>
12#include <asm/mmu_context.h> 17#include <asm/mmu_context.h>
13#include <asm/mtrr.h> 18#include <asm/mtrr.h>
14#include <asm/mce.h> 19#include <asm/mce.h>
15#include <asm/pat.h> 20#include <asm/pat.h>
16#include <asm/asm.h> 21#include <asm/asm.h>
22#include <asm/numa.h>
17#ifdef CONFIG_X86_LOCAL_APIC 23#ifdef CONFIG_X86_LOCAL_APIC
18#include <asm/mpspec.h> 24#include <asm/mpspec.h>
19#include <asm/apic.h> 25#include <asm/apic.h>
20#include <mach_apic.h> 26#include <mach_apic.h>
27#include <asm/genapic.h>
21#endif 28#endif
22 29
30#include <asm/pda.h>
31#include <asm/pgtable.h>
32#include <asm/processor.h>
33#include <asm/desc.h>
34#include <asm/atomic.h>
35#include <asm/proto.h>
36#include <asm/sections.h>
37#include <asm/setup.h>
38
23#include "cpu.h" 39#include "cpu.h"
24 40
41static struct cpu_dev *this_cpu __cpuinitdata;
42
43#ifdef CONFIG_X86_64
44/* We need valid kernel segments for data and code in long mode too
45 * IRET will check the segment types kkeil 2000/10/28
46 * Also sysret mandates a special GDT layout
47 */
48/* The TLS descriptors are currently at a different place compared to i386.
49 Hopefully nobody expects them at a fixed place (Wine?) */
25DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { 50DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
51 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
52 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
53 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
54 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
55 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
56 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
57} };
58#else
59DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
26 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, 60 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
27 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, 61 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
28 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, 62 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
@@ -56,17 +90,157 @@ DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
56 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, 90 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
57 [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, 91 [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
58} }; 92} };
93#endif
59EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 94EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
60 95
61__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; 96#ifdef CONFIG_X86_32
62
63static int cachesize_override __cpuinitdata = -1; 97static int cachesize_override __cpuinitdata = -1;
64static int disable_x86_serial_nr __cpuinitdata = 1; 98static int disable_x86_serial_nr __cpuinitdata = 1;
65 99
66struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 100static int __init cachesize_setup(char *str)
101{
102 get_option(&str, &cachesize_override);
103 return 1;
104}
105__setup("cachesize=", cachesize_setup);
106
107static int __init x86_fxsr_setup(char *s)
108{
109 setup_clear_cpu_cap(X86_FEATURE_FXSR);
110 setup_clear_cpu_cap(X86_FEATURE_XMM);
111 return 1;
112}
113__setup("nofxsr", x86_fxsr_setup);
114
115static int __init x86_sep_setup(char *s)
116{
117 setup_clear_cpu_cap(X86_FEATURE_SEP);
118 return 1;
119}
120__setup("nosep", x86_sep_setup);
121
122/* Standard macro to see if a specific flag is changeable */
123static inline int flag_is_changeable_p(u32 flag)
124{
125 u32 f1, f2;
126
127 /*
128 * Cyrix and IDT cpus allow disabling of CPUID
129 * so the code below may return different results
130 * when it is executed before and after enabling
131 * the CPUID. Add "volatile" to not allow gcc to
132 * optimize the subsequent calls to this function.
133 */
134 asm volatile ("pushfl\n\t"
135 "pushfl\n\t"
136 "popl %0\n\t"
137 "movl %0,%1\n\t"
138 "xorl %2,%0\n\t"
139 "pushl %0\n\t"
140 "popfl\n\t"
141 "pushfl\n\t"
142 "popl %0\n\t"
143 "popfl\n\t"
144 : "=&r" (f1), "=&r" (f2)
145 : "ir" (flag));
146
147 return ((f1^f2) & flag) != 0;
148}
149
150/* Probe for the CPUID instruction */
151static int __cpuinit have_cpuid_p(void)
152{
153 return flag_is_changeable_p(X86_EFLAGS_ID);
154}
155
156static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
157{
158 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
159 /* Disable processor serial number */
160 unsigned long lo, hi;
161 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
162 lo |= 0x200000;
163 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
164 printk(KERN_NOTICE "CPU serial number disabled.\n");
165 clear_cpu_cap(c, X86_FEATURE_PN);
166
167 /* Disabling the serial number may affect the cpuid level */
168 c->cpuid_level = cpuid_eax(0);
169 }
170}
171
172static int __init x86_serial_nr_setup(char *s)
173{
174 disable_x86_serial_nr = 0;
175 return 1;
176}
177__setup("serialnumber", x86_serial_nr_setup);
178#else
179static inline int flag_is_changeable_p(u32 flag)
180{
181 return 1;
182}
183/* Probe for the CPUID instruction */
184static inline int have_cpuid_p(void)
185{
186 return 1;
187}
188static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
189{
190}
191#endif
192
193/*
194 * Naming convention should be: <Name> [(<Codename>)]
195 * This table only is used unless init_<vendor>() below doesn't set it;
196 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
197 *
198 */
199
200/* Look up CPU names by table lookup. */
201static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
202{
203 struct cpu_model_info *info;
204
205 if (c->x86_model >= 16)
206 return NULL; /* Range check */
207
208 if (!this_cpu)
209 return NULL;
210
211 info = this_cpu->c_models;
212
213 while (info && info->family) {
214 if (info->family == c->x86)
215 return info->model_names[c->x86_model];
216 info++;
217 }
218 return NULL; /* Not found */
219}
220
221__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
222
223/* Current gdt points %fs at the "master" per-cpu area: after this,
224 * it's on the real one. */
225void switch_to_new_gdt(void)
226{
227 struct desc_ptr gdt_descr;
228
229 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
230 gdt_descr.size = GDT_SIZE - 1;
231 load_gdt(&gdt_descr);
232#ifdef CONFIG_X86_32
233 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
234#endif
235}
236
237static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
67 238
68static void __cpuinit default_init(struct cpuinfo_x86 *c) 239static void __cpuinit default_init(struct cpuinfo_x86 *c)
69{ 240{
241#ifdef CONFIG_X86_64
242 display_cacheinfo(c);
243#else
70 /* Not much we can do here... */ 244 /* Not much we can do here... */
71 /* Check if at least it has cpuid */ 245 /* Check if at least it has cpuid */
72 if (c->cpuid_level == -1) { 246 if (c->cpuid_level == -1) {
@@ -76,28 +250,22 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
76 else if (c->x86 == 3) 250 else if (c->x86 == 3)
77 strcpy(c->x86_model_id, "386"); 251 strcpy(c->x86_model_id, "386");
78 } 252 }
253#endif
79} 254}
80 255
81static struct cpu_dev __cpuinitdata default_cpu = { 256static struct cpu_dev __cpuinitdata default_cpu = {
82 .c_init = default_init, 257 .c_init = default_init,
83 .c_vendor = "Unknown", 258 .c_vendor = "Unknown",
259 .c_x86_vendor = X86_VENDOR_UNKNOWN,
84}; 260};
85static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
86
87static int __init cachesize_setup(char *str)
88{
89 get_option(&str, &cachesize_override);
90 return 1;
91}
92__setup("cachesize=", cachesize_setup);
93 261
94int __cpuinit get_model_name(struct cpuinfo_x86 *c) 262static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
95{ 263{
96 unsigned int *v; 264 unsigned int *v;
97 char *p, *q; 265 char *p, *q;
98 266
99 if (cpuid_eax(0x80000000) < 0x80000004) 267 if (c->extended_cpuid_level < 0x80000004)
100 return 0; 268 return;
101 269
102 v = (unsigned int *) c->x86_model_id; 270 v = (unsigned int *) c->x86_model_id;
103 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 271 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
@@ -116,30 +284,34 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c)
116 while (q <= &c->x86_model_id[48]) 284 while (q <= &c->x86_model_id[48])
117 *q++ = '\0'; /* Zero-pad the rest */ 285 *q++ = '\0'; /* Zero-pad the rest */
118 } 286 }
119
120 return 1;
121} 287}
122 288
123
124void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) 289void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
125{ 290{
126 unsigned int n, dummy, ecx, edx, l2size; 291 unsigned int n, dummy, ebx, ecx, edx, l2size;
127 292
128 n = cpuid_eax(0x80000000); 293 n = c->extended_cpuid_level;
129 294
130 if (n >= 0x80000005) { 295 if (n >= 0x80000005) {
131 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); 296 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
132 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", 297 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
133 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); 298 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
134 c->x86_cache_size = (ecx>>24)+(edx>>24); 299 c->x86_cache_size = (ecx>>24) + (edx>>24);
300#ifdef CONFIG_X86_64
301 /* On K8 L1 TLB is inclusive, so don't count it */
302 c->x86_tlbsize = 0;
303#endif
135 } 304 }
136 305
137 if (n < 0x80000006) /* Some chips just has a large L1. */ 306 if (n < 0x80000006) /* Some chips just has a large L1. */
138 return; 307 return;
139 308
140 ecx = cpuid_ecx(0x80000006); 309 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
141 l2size = ecx >> 16; 310 l2size = ecx >> 16;
142 311
312#ifdef CONFIG_X86_64
313 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
314#else
143 /* do processor-specific cache resizing */ 315 /* do processor-specific cache resizing */
144 if (this_cpu->c_size_cache) 316 if (this_cpu->c_size_cache)
145 l2size = this_cpu->c_size_cache(c, l2size); 317 l2size = this_cpu->c_size_cache(c, l2size);
@@ -150,116 +322,106 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
150 322
151 if (l2size == 0) 323 if (l2size == 0)
152 return; /* Again, no L2 cache is possible */ 324 return; /* Again, no L2 cache is possible */
325#endif
153 326
154 c->x86_cache_size = l2size; 327 c->x86_cache_size = l2size;
155 328
156 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", 329 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
157 l2size, ecx & 0xFF); 330 l2size, ecx & 0xFF);
158} 331}
159 332
160/* 333void __cpuinit detect_ht(struct cpuinfo_x86 *c)
161 * Naming convention should be: <Name> [(<Codename>)]
162 * This table only is used unless init_<vendor>() below doesn't set it;
163 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
164 *
165 */
166
167/* Look up CPU names by table lookup. */
168static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
169{ 334{
170 struct cpu_model_info *info; 335#ifdef CONFIG_X86_HT
336 u32 eax, ebx, ecx, edx;
337 int index_msb, core_bits;
171 338
172 if (c->x86_model >= 16) 339 if (!cpu_has(c, X86_FEATURE_HT))
173 return NULL; /* Range check */ 340 return;
174 341
175 if (!this_cpu) 342 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
176 return NULL; 343 goto out;
177 344
178 info = this_cpu->c_models; 345 if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
346 return;
179 347
180 while (info && info->family) { 348 cpuid(1, &eax, &ebx, &ecx, &edx);
181 if (info->family == c->x86) 349
182 return info->model_names[c->x86_model]; 350 smp_num_siblings = (ebx & 0xff0000) >> 16;
183 info++; 351
352 if (smp_num_siblings == 1) {
353 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
354 } else if (smp_num_siblings > 1) {
355
356 if (smp_num_siblings > NR_CPUS) {
357 printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
358 smp_num_siblings);
359 smp_num_siblings = 1;
360 return;
361 }
362
363 index_msb = get_count_order(smp_num_siblings);
364#ifdef CONFIG_X86_64
365 c->phys_proc_id = phys_pkg_id(index_msb);
366#else
367 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
368#endif
369
370 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
371
372 index_msb = get_count_order(smp_num_siblings);
373
374 core_bits = get_count_order(c->x86_max_cores);
375
376#ifdef CONFIG_X86_64
377 c->cpu_core_id = phys_pkg_id(index_msb) &
378 ((1 << core_bits) - 1);
379#else
380 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
381 ((1 << core_bits) - 1);
382#endif
184 } 383 }
185 return NULL; /* Not found */
186}
187 384
385out:
386 if ((c->x86_max_cores * smp_num_siblings) > 1) {
387 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
388 c->phys_proc_id);
389 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
390 c->cpu_core_id);
391 }
392#endif
393}
188 394
189static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) 395static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
190{ 396{
191 char *v = c->x86_vendor_id; 397 char *v = c->x86_vendor_id;
192 int i; 398 int i;
193 static int printed; 399 static int printed;
194 400
195 for (i = 0; i < X86_VENDOR_NUM; i++) { 401 for (i = 0; i < X86_VENDOR_NUM; i++) {
196 if (cpu_devs[i]) { 402 if (!cpu_devs[i])
197 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 403 break;
198 (cpu_devs[i]->c_ident[1] && 404
199 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 405 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
200 c->x86_vendor = i; 406 (cpu_devs[i]->c_ident[1] &&
201 if (!early) 407 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
202 this_cpu = cpu_devs[i]; 408 this_cpu = cpu_devs[i];
203 return; 409 c->x86_vendor = this_cpu->c_x86_vendor;
204 } 410 return;
205 } 411 }
206 } 412 }
413
207 if (!printed) { 414 if (!printed) {
208 printed++; 415 printed++;
209 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); 416 printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v);
210 printk(KERN_ERR "CPU: Your system may be unstable.\n"); 417 printk(KERN_ERR "CPU: Your system may be unstable.\n");
211 } 418 }
419
212 c->x86_vendor = X86_VENDOR_UNKNOWN; 420 c->x86_vendor = X86_VENDOR_UNKNOWN;
213 this_cpu = &default_cpu; 421 this_cpu = &default_cpu;
214} 422}
215 423
216 424void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
217static int __init x86_fxsr_setup(char *s)
218{
219 setup_clear_cpu_cap(X86_FEATURE_FXSR);
220 setup_clear_cpu_cap(X86_FEATURE_XMM);
221 return 1;
222}
223__setup("nofxsr", x86_fxsr_setup);
224
225
226static int __init x86_sep_setup(char *s)
227{
228 setup_clear_cpu_cap(X86_FEATURE_SEP);
229 return 1;
230}
231__setup("nosep", x86_sep_setup);
232
233
234/* Standard macro to see if a specific flag is changeable */
235static inline int flag_is_changeable_p(u32 flag)
236{
237 u32 f1, f2;
238
239 asm("pushfl\n\t"
240 "pushfl\n\t"
241 "popl %0\n\t"
242 "movl %0,%1\n\t"
243 "xorl %2,%0\n\t"
244 "pushl %0\n\t"
245 "popfl\n\t"
246 "pushfl\n\t"
247 "popl %0\n\t"
248 "popfl\n\t"
249 : "=&r" (f1), "=&r" (f2)
250 : "ir" (flag));
251
252 return ((f1^f2) & flag) != 0;
253}
254
255
256/* Probe for the CPUID instruction */
257static int __cpuinit have_cpuid_p(void)
258{
259 return flag_is_changeable_p(X86_EFLAGS_ID);
260}
261
262void __init cpu_detect(struct cpuinfo_x86 *c)
263{ 425{
264 /* Get vendor name */ 426 /* Get vendor name */
265 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 427 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
@@ -268,50 +430,87 @@ void __init cpu_detect(struct cpuinfo_x86 *c)
268 (unsigned int *)&c->x86_vendor_id[4]); 430 (unsigned int *)&c->x86_vendor_id[4]);
269 431
270 c->x86 = 4; 432 c->x86 = 4;
433 /* Intel-defined flags: level 0x00000001 */
271 if (c->cpuid_level >= 0x00000001) { 434 if (c->cpuid_level >= 0x00000001) {
272 u32 junk, tfms, cap0, misc; 435 u32 junk, tfms, cap0, misc;
273 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 436 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
274 c->x86 = (tfms >> 8) & 15; 437 c->x86 = (tfms >> 8) & 0xf;
275 c->x86_model = (tfms >> 4) & 15; 438 c->x86_model = (tfms >> 4) & 0xf;
439 c->x86_mask = tfms & 0xf;
276 if (c->x86 == 0xf) 440 if (c->x86 == 0xf)
277 c->x86 += (tfms >> 20) & 0xff; 441 c->x86 += (tfms >> 20) & 0xff;
278 if (c->x86 >= 0x6) 442 if (c->x86 >= 0x6)
279 c->x86_model += ((tfms >> 16) & 0xF) << 4; 443 c->x86_model += ((tfms >> 16) & 0xf) << 4;
280 c->x86_mask = tfms & 15;
281 if (cap0 & (1<<19)) { 444 if (cap0 & (1<<19)) {
282 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
283 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 445 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
446 c->x86_cache_alignment = c->x86_clflush_size;
284 } 447 }
285 } 448 }
286} 449}
287static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) 450
451static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
288{ 452{
289 u32 tfms, xlvl; 453 u32 tfms, xlvl;
290 unsigned int ebx; 454 u32 ebx;
291 455
292 memset(&c->x86_capability, 0, sizeof c->x86_capability); 456 /* Intel-defined flags: level 0x00000001 */
293 if (have_cpuid_p()) { 457 if (c->cpuid_level >= 0x00000001) {
294 /* Intel-defined flags: level 0x00000001 */ 458 u32 capability, excap;
295 if (c->cpuid_level >= 0x00000001) { 459 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
296 u32 capability, excap; 460 c->x86_capability[0] = capability;
297 cpuid(0x00000001, &tfms, &ebx, &excap, &capability); 461 c->x86_capability[4] = excap;
298 c->x86_capability[0] = capability; 462 }
299 c->x86_capability[4] = excap;
300 }
301 463
302 /* AMD-defined flags: level 0x80000001 */ 464 /* AMD-defined flags: level 0x80000001 */
303 xlvl = cpuid_eax(0x80000000); 465 xlvl = cpuid_eax(0x80000000);
304 if ((xlvl & 0xffff0000) == 0x80000000) { 466 c->extended_cpuid_level = xlvl;
305 if (xlvl >= 0x80000001) { 467 if ((xlvl & 0xffff0000) == 0x80000000) {
306 c->x86_capability[1] = cpuid_edx(0x80000001); 468 if (xlvl >= 0x80000001) {
307 c->x86_capability[6] = cpuid_ecx(0x80000001); 469 c->x86_capability[1] = cpuid_edx(0x80000001);
308 } 470 c->x86_capability[6] = cpuid_ecx(0x80000001);
309 } 471 }
472 }
473
474#ifdef CONFIG_X86_64
475 if (c->extended_cpuid_level >= 0x80000008) {
476 u32 eax = cpuid_eax(0x80000008);
310 477
478 c->x86_virt_bits = (eax >> 8) & 0xff;
479 c->x86_phys_bits = eax & 0xff;
311 } 480 }
481#endif
482
483 if (c->extended_cpuid_level >= 0x80000007)
484 c->x86_power = cpuid_edx(0x80000007);
312 485
313} 486}
314 487
488static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
489{
490#ifdef CONFIG_X86_32
491 int i;
492
493 /*
494 * First of all, decide if this is a 486 or higher
495 * It's a 486 if we can modify the AC flag
496 */
497 if (flag_is_changeable_p(X86_EFLAGS_AC))
498 c->x86 = 4;
499 else
500 c->x86 = 3;
501
502 for (i = 0; i < X86_VENDOR_NUM; i++)
503 if (cpu_devs[i] && cpu_devs[i]->c_identify) {
504 c->x86_vendor_id[0] = 0;
505 cpu_devs[i]->c_identify(c);
506 if (c->x86_vendor_id[0]) {
507 get_cpu_vendor(c);
508 break;
509 }
510 }
511#endif
512}
513
315/* 514/*
316 * Do minimum CPU detection early. 515 * Do minimum CPU detection early.
317 * Fields really needed: vendor, cpuid_level, family, model, mask, 516 * Fields really needed: vendor, cpuid_level, family, model, mask,
@@ -321,25 +520,61 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
321 * WARNING: this function is only called on the BP. Don't add code here 520 * WARNING: this function is only called on the BP. Don't add code here
322 * that is supposed to run on all CPUs. 521 * that is supposed to run on all CPUs.
323 */ 522 */
324static void __init early_cpu_detect(void) 523static void __init early_identify_cpu(struct cpuinfo_x86 *c)
325{ 524{
326 struct cpuinfo_x86 *c = &boot_cpu_data; 525#ifdef CONFIG_X86_64
327 526 c->x86_clflush_size = 64;
328 c->x86_cache_alignment = 32; 527#else
329 c->x86_clflush_size = 32; 528 c->x86_clflush_size = 32;
529#endif
530 c->x86_cache_alignment = c->x86_clflush_size;
531
532 memset(&c->x86_capability, 0, sizeof c->x86_capability);
533 c->extended_cpuid_level = 0;
534
535 if (!have_cpuid_p())
536 identify_cpu_without_cpuid(c);
330 537
538 /* cyrix could have cpuid enabled via c_identify()*/
331 if (!have_cpuid_p()) 539 if (!have_cpuid_p())
332 return; 540 return;
333 541
334 cpu_detect(c); 542 cpu_detect(c);
335 543
336 get_cpu_vendor(c, 1); 544 get_cpu_vendor(c);
545
546 get_cpu_cap(c);
337 547
338 early_get_cap(c); 548 if (this_cpu->c_early_init)
549 this_cpu->c_early_init(c);
339 550
340 if (c->x86_vendor != X86_VENDOR_UNKNOWN && 551 validate_pat_support(c);
341 cpu_devs[c->x86_vendor]->c_early_init) 552}
342 cpu_devs[c->x86_vendor]->c_early_init(c); 553
554void __init early_cpu_init(void)
555{
556 struct cpu_dev **cdev;
557 int count = 0;
558
559 printk("KERNEL supported cpus:\n");
560 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
561 struct cpu_dev *cpudev = *cdev;
562 unsigned int j;
563
564 if (count >= X86_VENDOR_NUM)
565 break;
566 cpu_devs[count] = cpudev;
567 count++;
568
569 for (j = 0; j < 2; j++) {
570 if (!cpudev->c_ident[j])
571 continue;
572 printk(" %s %s\n", cpudev->c_vendor,
573 cpudev->c_ident[j]);
574 }
575 }
576
577 early_identify_cpu(&boot_cpu_data);
343} 578}
344 579
345/* 580/*
@@ -357,86 +592,41 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
357 592
358static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 593static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
359{ 594{
360 u32 tfms, xlvl; 595 c->extended_cpuid_level = 0;
361 unsigned int ebx;
362
363 if (have_cpuid_p()) {
364 /* Get vendor name */
365 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
366 (unsigned int *)&c->x86_vendor_id[0],
367 (unsigned int *)&c->x86_vendor_id[8],
368 (unsigned int *)&c->x86_vendor_id[4]);
369
370 get_cpu_vendor(c, 0);
371 /* Initialize the standard set of capabilities */
372 /* Note that the vendor-specific code below might override */
373 /* Intel-defined flags: level 0x00000001 */
374 if (c->cpuid_level >= 0x00000001) {
375 u32 capability, excap;
376 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
377 c->x86_capability[0] = capability;
378 c->x86_capability[4] = excap;
379 c->x86 = (tfms >> 8) & 15;
380 c->x86_model = (tfms >> 4) & 15;
381 if (c->x86 == 0xf)
382 c->x86 += (tfms >> 20) & 0xff;
383 if (c->x86 >= 0x6)
384 c->x86_model += ((tfms >> 16) & 0xF) << 4;
385 c->x86_mask = tfms & 15;
386 c->initial_apicid = (ebx >> 24) & 0xFF;
387#ifdef CONFIG_X86_HT
388 c->apicid = phys_pkg_id(c->initial_apicid, 0);
389 c->phys_proc_id = c->initial_apicid;
390#else
391 c->apicid = c->initial_apicid;
392#endif
393 if (test_cpu_cap(c, X86_FEATURE_CLFLSH))
394 c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
395 } else {
396 /* Have CPUID level 0 only - unheard of */
397 c->x86 = 4;
398 }
399 596
400 /* AMD-defined flags: level 0x80000001 */ 597 if (!have_cpuid_p())
401 xlvl = cpuid_eax(0x80000000); 598 identify_cpu_without_cpuid(c);
402 if ((xlvl & 0xffff0000) == 0x80000000) {
403 if (xlvl >= 0x80000001) {
404 c->x86_capability[1] = cpuid_edx(0x80000001);
405 c->x86_capability[6] = cpuid_ecx(0x80000001);
406 }
407 if (xlvl >= 0x80000004)
408 get_model_name(c); /* Default name */
409 }
410 599
411 init_scattered_cpuid_features(c); 600 /* cyrix could have cpuid enabled via c_identify()*/
412 detect_nopl(c); 601 if (!have_cpuid_p())
413 } 602 return;
414}
415 603
416static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 604 cpu_detect(c);
417{
418 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
419 /* Disable processor serial number */
420 unsigned long lo, hi;
421 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
422 lo |= 0x200000;
423 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
424 printk(KERN_NOTICE "CPU serial number disabled.\n");
425 clear_cpu_cap(c, X86_FEATURE_PN);
426 605
427 /* Disabling the serial number may affect the cpuid level */ 606 get_cpu_vendor(c);
428 c->cpuid_level = cpuid_eax(0);
429 }
430}
431 607
432static int __init x86_serial_nr_setup(char *s) 608 get_cpu_cap(c);
433{ 609
434 disable_x86_serial_nr = 0; 610 if (c->cpuid_level >= 0x00000001) {
435 return 1; 611 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
436} 612#ifdef CONFIG_X86_32
437__setup("serialnumber", x86_serial_nr_setup); 613# ifdef CONFIG_X86_HT
614 c->apicid = phys_pkg_id(c->initial_apicid, 0);
615# else
616 c->apicid = c->initial_apicid;
617# endif
618#endif
438 619
620#ifdef CONFIG_X86_HT
621 c->phys_proc_id = c->initial_apicid;
622#endif
623 }
439 624
625 get_model_name(c); /* Default name */
626
627 init_scattered_cpuid_features(c);
628 detect_nopl(c);
629}
440 630
441/* 631/*
442 * This does the hard work of actually picking apart the CPU stuff... 632 * This does the hard work of actually picking apart the CPU stuff...
@@ -448,30 +638,29 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
448 c->loops_per_jiffy = loops_per_jiffy; 638 c->loops_per_jiffy = loops_per_jiffy;
449 c->x86_cache_size = -1; 639 c->x86_cache_size = -1;
450 c->x86_vendor = X86_VENDOR_UNKNOWN; 640 c->x86_vendor = X86_VENDOR_UNKNOWN;
451 c->cpuid_level = -1; /* CPUID not detected */
452 c->x86_model = c->x86_mask = 0; /* So far unknown... */ 641 c->x86_model = c->x86_mask = 0; /* So far unknown... */
453 c->x86_vendor_id[0] = '\0'; /* Unset */ 642 c->x86_vendor_id[0] = '\0'; /* Unset */
454 c->x86_model_id[0] = '\0'; /* Unset */ 643 c->x86_model_id[0] = '\0'; /* Unset */
455 c->x86_max_cores = 1; 644 c->x86_max_cores = 1;
645 c->x86_coreid_bits = 0;
646#ifdef CONFIG_X86_64
647 c->x86_clflush_size = 64;
648#else
649 c->cpuid_level = -1; /* CPUID not detected */
456 c->x86_clflush_size = 32; 650 c->x86_clflush_size = 32;
651#endif
652 c->x86_cache_alignment = c->x86_clflush_size;
457 memset(&c->x86_capability, 0, sizeof c->x86_capability); 653 memset(&c->x86_capability, 0, sizeof c->x86_capability);
458 654
459 if (!have_cpuid_p()) {
460 /*
461 * First of all, decide if this is a 486 or higher
462 * It's a 486 if we can modify the AC flag
463 */
464 if (flag_is_changeable_p(X86_EFLAGS_AC))
465 c->x86 = 4;
466 else
467 c->x86 = 3;
468 }
469
470 generic_identify(c); 655 generic_identify(c);
471 656
472 if (this_cpu->c_identify) 657 if (this_cpu->c_identify)
473 this_cpu->c_identify(c); 658 this_cpu->c_identify(c);
474 659
660#ifdef CONFIG_X86_64
661 c->apicid = phys_pkg_id(0);
662#endif
663
475 /* 664 /*
476 * Vendor-specific initialization. In this section we 665 * Vendor-specific initialization. In this section we
477 * canonicalize the feature flags, meaning if there are 666 * canonicalize the feature flags, meaning if there are
@@ -505,6 +694,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
505 c->x86, c->x86_model); 694 c->x86, c->x86_model);
506 } 695 }
507 696
697#ifdef CONFIG_X86_64
698 detect_ht(c);
699#endif
700
508 /* 701 /*
509 * On SMP, boot_cpu_data holds the common feature set between 702 * On SMP, boot_cpu_data holds the common feature set between
510 * all CPUs; so make sure that we indicate which features are 703 * all CPUs; so make sure that we indicate which features are
@@ -513,7 +706,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
513 */ 706 */
514 if (c != &boot_cpu_data) { 707 if (c != &boot_cpu_data) {
515 /* AND the already accumulated flags with these */ 708 /* AND the already accumulated flags with these */
516 for (i = 0 ; i < NCAPINTS ; i++) 709 for (i = 0; i < NCAPINTS; i++)
517 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 710 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
518 } 711 }
519 712
@@ -521,72 +714,91 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
521 for (i = 0; i < NCAPINTS; i++) 714 for (i = 0; i < NCAPINTS; i++)
522 c->x86_capability[i] &= ~cleared_cpu_caps[i]; 715 c->x86_capability[i] &= ~cleared_cpu_caps[i];
523 716
717#ifdef CONFIG_X86_MCE
524 /* Init Machine Check Exception if available. */ 718 /* Init Machine Check Exception if available. */
525 mcheck_init(c); 719 mcheck_init(c);
720#endif
526 721
527 select_idle_routine(c); 722 select_idle_routine(c);
723
724#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
725 numa_add_cpu(smp_processor_id());
726#endif
528} 727}
529 728
729#ifdef CONFIG_X86_64
730static void vgetcpu_set_mode(void)
731{
732 if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
733 vgetcpu_mode = VGETCPU_RDTSCP;
734 else
735 vgetcpu_mode = VGETCPU_LSL;
736}
737#endif
738
530void __init identify_boot_cpu(void) 739void __init identify_boot_cpu(void)
531{ 740{
532 identify_cpu(&boot_cpu_data); 741 identify_cpu(&boot_cpu_data);
742#ifdef CONFIG_X86_32
533 sysenter_setup(); 743 sysenter_setup();
534 enable_sep_cpu(); 744 enable_sep_cpu();
745#else
746 vgetcpu_set_mode();
747#endif
535} 748}
536 749
537void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) 750void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
538{ 751{
539 BUG_ON(c == &boot_cpu_data); 752 BUG_ON(c == &boot_cpu_data);
540 identify_cpu(c); 753 identify_cpu(c);
754#ifdef CONFIG_X86_32
541 enable_sep_cpu(); 755 enable_sep_cpu();
756#endif
542 mtrr_ap_init(); 757 mtrr_ap_init();
543} 758}
544 759
545#ifdef CONFIG_X86_HT 760struct msr_range {
546void __cpuinit detect_ht(struct cpuinfo_x86 *c) 761 unsigned min;
547{ 762 unsigned max;
548 u32 eax, ebx, ecx, edx; 763};
549 int index_msb, core_bits;
550
551 cpuid(1, &eax, &ebx, &ecx, &edx);
552
553 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
554 return;
555
556 smp_num_siblings = (ebx & 0xff0000) >> 16;
557 764
558 if (smp_num_siblings == 1) { 765static struct msr_range msr_range_array[] __cpuinitdata = {
559 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 766 { 0x00000000, 0x00000418},
560 } else if (smp_num_siblings > 1) { 767 { 0xc0000000, 0xc000040b},
768 { 0xc0010000, 0xc0010142},
769 { 0xc0011000, 0xc001103b},
770};
561 771
562 if (smp_num_siblings > NR_CPUS) { 772static void __cpuinit print_cpu_msr(void)
563 printk(KERN_WARNING "CPU: Unsupported number of the " 773{
564 "siblings %d", smp_num_siblings); 774 unsigned index;
565 smp_num_siblings = 1; 775 u64 val;
566 return; 776 int i;
777 unsigned index_min, index_max;
778
779 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
780 index_min = msr_range_array[i].min;
781 index_max = msr_range_array[i].max;
782 for (index = index_min; index < index_max; index++) {
783 if (rdmsrl_amd_safe(index, &val))
784 continue;
785 printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
567 } 786 }
787 }
788}
568 789
569 index_msb = get_count_order(smp_num_siblings); 790static int show_msr __cpuinitdata;
570 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); 791static __init int setup_show_msr(char *arg)
571 792{
572 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 793 int num;
573 c->phys_proc_id);
574
575 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
576
577 index_msb = get_count_order(smp_num_siblings) ;
578
579 core_bits = get_count_order(c->x86_max_cores);
580 794
581 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & 795 get_option(&arg, &num);
582 ((1 << core_bits) - 1);
583 796
584 if (c->x86_max_cores > 1) 797 if (num > 0)
585 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 798 show_msr = num;
586 c->cpu_core_id); 799 return 1;
587 }
588} 800}
589#endif 801__setup("show_msr=", setup_show_msr);
590 802
591static __init int setup_noclflush(char *arg) 803static __init int setup_noclflush(char *arg)
592{ 804{
@@ -604,18 +816,26 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
604 else if (c->cpuid_level >= 0) 816 else if (c->cpuid_level >= 0)
605 vendor = c->x86_vendor_id; 817 vendor = c->x86_vendor_id;
606 818
607 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) 819 if (vendor && !strstr(c->x86_model_id, vendor))
608 printk("%s ", vendor); 820 printk(KERN_CONT "%s ", vendor);
609 821
610 if (!c->x86_model_id[0]) 822 if (c->x86_model_id[0])
611 printk("%d86", c->x86); 823 printk(KERN_CONT "%s", c->x86_model_id);
612 else 824 else
613 printk("%s", c->x86_model_id); 825 printk(KERN_CONT "%d86", c->x86);
614 826
615 if (c->x86_mask || c->cpuid_level >= 0) 827 if (c->x86_mask || c->cpuid_level >= 0)
616 printk(" stepping %02x\n", c->x86_mask); 828 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
617 else 829 else
618 printk("\n"); 830 printk(KERN_CONT "\n");
831
832#ifdef CONFIG_SMP
833 if (c->cpu_index < show_msr)
834 print_cpu_msr();
835#else
836 if (show_msr)
837 print_cpu_msr();
838#endif
619} 839}
620 840
621static __init int setup_disablecpuid(char *arg) 841static __init int setup_disablecpuid(char *arg)
@@ -631,19 +851,89 @@ __setup("clearcpuid=", setup_disablecpuid);
631 851
632cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; 852cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
633 853
634void __init early_cpu_init(void) 854#ifdef CONFIG_X86_64
855struct x8664_pda **_cpu_pda __read_mostly;
856EXPORT_SYMBOL(_cpu_pda);
857
858struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
859
860char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
861
862void __cpuinit pda_init(int cpu)
635{ 863{
636 struct cpu_vendor_dev *cvdev; 864 struct x8664_pda *pda = cpu_pda(cpu);
865
866 /* Setup up data that may be needed in __get_free_pages early */
867 loadsegment(fs, 0);
868 loadsegment(gs, 0);
869 /* Memory clobbers used to order PDA accessed */
870 mb();
871 wrmsrl(MSR_GS_BASE, pda);
872 mb();
873
874 pda->cpunumber = cpu;
875 pda->irqcount = -1;
876 pda->kernelstack = (unsigned long)stack_thread_info() -
877 PDA_STACKOFFSET + THREAD_SIZE;
878 pda->active_mm = &init_mm;
879 pda->mmu_state = 0;
880
881 if (cpu == 0) {
882 /* others are initialized in smpboot.c */
883 pda->pcurrent = &init_task;
884 pda->irqstackptr = boot_cpu_stack;
885 pda->irqstackptr += IRQSTACKSIZE - 64;
886 } else {
887 if (!pda->irqstackptr) {
888 pda->irqstackptr = (char *)
889 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
890 if (!pda->irqstackptr)
891 panic("cannot allocate irqstack for cpu %d",
892 cpu);
893 pda->irqstackptr += IRQSTACKSIZE - 64;
894 }
895
896 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
897 pda->nodenumber = cpu_to_node(cpu);
898 }
899}
900
901char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
902 DEBUG_STKSZ] __page_aligned_bss;
637 903
638 for (cvdev = __x86cpuvendor_start ; 904extern asmlinkage void ignore_sysret(void);
639 cvdev < __x86cpuvendor_end ;
640 cvdev++)
641 cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
642 905
643 early_cpu_detect(); 906/* May not be marked __init: used by software suspend */
644 validate_pat_support(&boot_cpu_data); 907void syscall_init(void)
908{
909 /*
910 * LSTAR and STAR live in a bit strange symbiosis.
911 * They both write to the same internal register. STAR allows to
912 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
913 */
914 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
915 wrmsrl(MSR_LSTAR, system_call);
916 wrmsrl(MSR_CSTAR, ignore_sysret);
917
918#ifdef CONFIG_IA32_EMULATION
919 syscall32_cpu_init();
920#endif
921
922 /* Flags to clear on syscall */
923 wrmsrl(MSR_SYSCALL_MASK,
924 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
645} 925}
646 926
927unsigned long kernel_eflags;
928
929/*
930 * Copies of the original ist values from the tss are only accessed during
931 * debugging, no special alignment required.
932 */
933DEFINE_PER_CPU(struct orig_ist, orig_ist);
934
935#else
936
647/* Make sure %fs is initialized properly in idle threads */ 937/* Make sure %fs is initialized properly in idle threads */
648struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) 938struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
649{ 939{
@@ -651,25 +941,136 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
651 regs->fs = __KERNEL_PERCPU; 941 regs->fs = __KERNEL_PERCPU;
652 return regs; 942 return regs;
653} 943}
654 944#endif
655/* Current gdt points %fs at the "master" per-cpu area: after this,
656 * it's on the real one. */
657void switch_to_new_gdt(void)
658{
659 struct desc_ptr gdt_descr;
660
661 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
662 gdt_descr.size = GDT_SIZE - 1;
663 load_gdt(&gdt_descr);
664 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
665}
666 945
667/* 946/*
668 * cpu_init() initializes state that is per-CPU. Some data is already 947 * cpu_init() initializes state that is per-CPU. Some data is already
669 * initialized (naturally) in the bootstrap process, such as the GDT 948 * initialized (naturally) in the bootstrap process, such as the GDT
670 * and IDT. We reload them nevertheless, this function acts as a 949 * and IDT. We reload them nevertheless, this function acts as a
671 * 'CPU state barrier', nothing should get across. 950 * 'CPU state barrier', nothing should get across.
951 * A lot of state is already set up in PDA init for 64 bit
672 */ 952 */
953#ifdef CONFIG_X86_64
954void __cpuinit cpu_init(void)
955{
956 int cpu = stack_smp_processor_id();
957 struct tss_struct *t = &per_cpu(init_tss, cpu);
958 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
959 unsigned long v;
960 char *estacks = NULL;
961 struct task_struct *me;
962 int i;
963
964 /* CPU 0 is initialised in head64.c */
965 if (cpu != 0)
966 pda_init(cpu);
967 else
968 estacks = boot_exception_stacks;
969
970 me = current;
971
972 if (cpu_test_and_set(cpu, cpu_initialized))
973 panic("CPU#%d already initialized!\n", cpu);
974
975 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
976
977 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
978
979 /*
980 * Initialize the per-CPU GDT with the boot GDT,
981 * and set up the GDT descriptor:
982 */
983
984 switch_to_new_gdt();
985 load_idt((const struct desc_ptr *)&idt_descr);
986
987 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
988 syscall_init();
989
990 wrmsrl(MSR_FS_BASE, 0);
991 wrmsrl(MSR_KERNEL_GS_BASE, 0);
992 barrier();
993
994 check_efer();
995 if (cpu != 0 && x2apic)
996 enable_x2apic();
997
998 /*
999 * set up and load the per-CPU TSS
1000 */
1001 if (!orig_ist->ist[0]) {
1002 static const unsigned int order[N_EXCEPTION_STACKS] = {
1003 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
1004 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
1005 };
1006 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1007 if (cpu) {
1008 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
1009 if (!estacks)
1010 panic("Cannot allocate exception "
1011 "stack %ld %d\n", v, cpu);
1012 }
1013 estacks += PAGE_SIZE << order[v];
1014 orig_ist->ist[v] = t->x86_tss.ist[v] =
1015 (unsigned long)estacks;
1016 }
1017 }
1018
1019 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1020 /*
1021 * <= is required because the CPU will access up to
1022 * 8 bits beyond the end of the IO permission bitmap.
1023 */
1024 for (i = 0; i <= IO_BITMAP_LONGS; i++)
1025 t->io_bitmap[i] = ~0UL;
1026
1027 atomic_inc(&init_mm.mm_count);
1028 me->active_mm = &init_mm;
1029 if (me->mm)
1030 BUG();
1031 enter_lazy_tlb(&init_mm, me);
1032
1033 load_sp0(t, &current->thread);
1034 set_tss_desc(cpu, t);
1035 load_TR_desc();
1036 load_LDT(&init_mm.context);
1037
1038#ifdef CONFIG_KGDB
1039 /*
1040 * If the kgdb is connected no debug regs should be altered. This
1041 * is only applicable when KGDB and a KGDB I/O module are built
1042 * into the kernel and you are using early debugging with
1043 * kgdbwait. KGDB will control the kernel HW breakpoint registers.
1044 */
1045 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
1046 arch_kgdb_ops.correct_hw_break();
1047 else {
1048#endif
1049 /*
1050 * Clear all 6 debug registers:
1051 */
1052
1053 set_debugreg(0UL, 0);
1054 set_debugreg(0UL, 1);
1055 set_debugreg(0UL, 2);
1056 set_debugreg(0UL, 3);
1057 set_debugreg(0UL, 6);
1058 set_debugreg(0UL, 7);
1059#ifdef CONFIG_KGDB
1060 /* If the kgdb is connected no debug regs should be altered. */
1061 }
1062#endif
1063
1064 fpu_init();
1065
1066 raw_local_save_flags(kernel_eflags);
1067
1068 if (is_uv_system())
1069 uv_cpu_init();
1070}
1071
1072#else
1073
673void __cpuinit cpu_init(void) 1074void __cpuinit cpu_init(void)
674{ 1075{
675 int cpu = smp_processor_id(); 1076 int cpu = smp_processor_id();
@@ -723,19 +1124,21 @@ void __cpuinit cpu_init(void)
723 /* 1124 /*
724 * Force FPU initialization: 1125 * Force FPU initialization:
725 */ 1126 */
726 current_thread_info()->status = 0; 1127 if (cpu_has_xsave)
1128 current_thread_info()->status = TS_XSAVE;
1129 else
1130 current_thread_info()->status = 0;
727 clear_used_math(); 1131 clear_used_math();
728 mxcsr_feature_mask_init(); 1132 mxcsr_feature_mask_init();
729}
730 1133
731#ifdef CONFIG_HOTPLUG_CPU 1134 /*
732void __cpuinit cpu_uninit(void) 1135 * Boot processor to setup the FP and extended state context info.
733{ 1136 */
734 int cpu = raw_smp_processor_id(); 1137 if (!smp_processor_id())
735 cpu_clear(cpu, cpu_initialized); 1138 init_thread_xstate();
736 1139
737 /* lazy TLB state */ 1140 xsave_init();
738 per_cpu(cpu_tlbstate, cpu).state = 0;
739 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
740} 1141}
1142
1143
741#endif 1144#endif
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c
deleted file mode 100644
index a11f5d4477cd..000000000000
--- a/arch/x86/kernel/cpu/common_64.c
+++ /dev/null
@@ -1,712 +0,0 @@
1#include <linux/init.h>
2#include <linux/kernel.h>
3#include <linux/sched.h>
4#include <linux/string.h>
5#include <linux/bootmem.h>
6#include <linux/bitops.h>
7#include <linux/module.h>
8#include <linux/kgdb.h>
9#include <linux/topology.h>
10#include <linux/delay.h>
11#include <linux/smp.h>
12#include <linux/percpu.h>
13#include <asm/i387.h>
14#include <asm/msr.h>
15#include <asm/io.h>
16#include <asm/linkage.h>
17#include <asm/mmu_context.h>
18#include <asm/mtrr.h>
19#include <asm/mce.h>
20#include <asm/pat.h>
21#include <asm/asm.h>
22#include <asm/numa.h>
23#ifdef CONFIG_X86_LOCAL_APIC
24#include <asm/mpspec.h>
25#include <asm/apic.h>
26#include <mach_apic.h>
27#endif
28#include <asm/pda.h>
29#include <asm/pgtable.h>
30#include <asm/processor.h>
31#include <asm/desc.h>
32#include <asm/atomic.h>
33#include <asm/proto.h>
34#include <asm/sections.h>
35#include <asm/setup.h>
36#include <asm/genapic.h>
37
38#include "cpu.h"
39
40/* We need valid kernel segments for data and code in long mode too
41 * IRET will check the segment types kkeil 2000/10/28
42 * Also sysret mandates a special GDT layout
43 */
44/* The TLS descriptors are currently at a different place compared to i386.
45 Hopefully nobody expects them at a fixed place (Wine?) */
46DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
47 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
48 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
49 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
50 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
51 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
52 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
53} };
54EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
55
56__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
57
58/* Current gdt points %fs at the "master" per-cpu area: after this,
59 * it's on the real one. */
60void switch_to_new_gdt(void)
61{
62 struct desc_ptr gdt_descr;
63
64 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
65 gdt_descr.size = GDT_SIZE - 1;
66 load_gdt(&gdt_descr);
67}
68
69struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
70
71static void __cpuinit default_init(struct cpuinfo_x86 *c)
72{
73 display_cacheinfo(c);
74}
75
76static struct cpu_dev __cpuinitdata default_cpu = {
77 .c_init = default_init,
78 .c_vendor = "Unknown",
79};
80static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
81
82int __cpuinit get_model_name(struct cpuinfo_x86 *c)
83{
84 unsigned int *v;
85
86 if (c->extended_cpuid_level < 0x80000004)
87 return 0;
88
89 v = (unsigned int *) c->x86_model_id;
90 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
91 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
92 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
93 c->x86_model_id[48] = 0;
94 return 1;
95}
96
97
98void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
99{
100 unsigned int n, dummy, ebx, ecx, edx;
101
102 n = c->extended_cpuid_level;
103
104 if (n >= 0x80000005) {
105 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
106 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
107 "D cache %dK (%d bytes/line)\n",
108 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
109 c->x86_cache_size = (ecx>>24) + (edx>>24);
110 /* On K8 L1 TLB is inclusive, so don't count it */
111 c->x86_tlbsize = 0;
112 }
113
114 if (n >= 0x80000006) {
115 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
116 ecx = cpuid_ecx(0x80000006);
117 c->x86_cache_size = ecx >> 16;
118 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
119
120 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
121 c->x86_cache_size, ecx & 0xFF);
122 }
123}
124
125void __cpuinit detect_ht(struct cpuinfo_x86 *c)
126{
127#ifdef CONFIG_SMP
128 u32 eax, ebx, ecx, edx;
129 int index_msb, core_bits;
130
131 cpuid(1, &eax, &ebx, &ecx, &edx);
132
133
134 if (!cpu_has(c, X86_FEATURE_HT))
135 return;
136 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
137 goto out;
138
139 smp_num_siblings = (ebx & 0xff0000) >> 16;
140
141 if (smp_num_siblings == 1) {
142 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
143 } else if (smp_num_siblings > 1) {
144
145 if (smp_num_siblings > NR_CPUS) {
146 printk(KERN_WARNING "CPU: Unsupported number of "
147 "siblings %d", smp_num_siblings);
148 smp_num_siblings = 1;
149 return;
150 }
151
152 index_msb = get_count_order(smp_num_siblings);
153 c->phys_proc_id = phys_pkg_id(index_msb);
154
155 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
156
157 index_msb = get_count_order(smp_num_siblings);
158
159 core_bits = get_count_order(c->x86_max_cores);
160
161 c->cpu_core_id = phys_pkg_id(index_msb) &
162 ((1 << core_bits) - 1);
163 }
164out:
165 if ((c->x86_max_cores * smp_num_siblings) > 1) {
166 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
167 c->phys_proc_id);
168 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
169 c->cpu_core_id);
170 }
171
172#endif
173}
174
175static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
176{
177 char *v = c->x86_vendor_id;
178 int i;
179 static int printed;
180
181 for (i = 0; i < X86_VENDOR_NUM; i++) {
182 if (cpu_devs[i]) {
183 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
184 (cpu_devs[i]->c_ident[1] &&
185 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
186 c->x86_vendor = i;
187 this_cpu = cpu_devs[i];
188 return;
189 }
190 }
191 }
192 if (!printed) {
193 printed++;
194 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
195 printk(KERN_ERR "CPU: Your system may be unstable.\n");
196 }
197 c->x86_vendor = X86_VENDOR_UNKNOWN;
198}
199
200static void __init early_cpu_support_print(void)
201{
202 int i,j;
203 struct cpu_dev *cpu_devx;
204
205 printk("KERNEL supported cpus:\n");
206 for (i = 0; i < X86_VENDOR_NUM; i++) {
207 cpu_devx = cpu_devs[i];
208 if (!cpu_devx)
209 continue;
210 for (j = 0; j < 2; j++) {
211 if (!cpu_devx->c_ident[j])
212 continue;
213 printk(" %s %s\n", cpu_devx->c_vendor,
214 cpu_devx->c_ident[j]);
215 }
216 }
217}
218
219/*
220 * The NOPL instruction is supposed to exist on all CPUs with
221 * family >= 6, unfortunately, that's not true in practice because
222 * of early VIA chips and (more importantly) broken virtualizers that
223 * are not easy to detect. Hence, probe for it based on first
224 * principles.
225 *
226 * Note: no 64-bit chip is known to lack these, but put the code here
227 * for consistency with 32 bits, and to make it utterly trivial to
228 * diagnose the problem should it ever surface.
229 */
230static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
231{
232 const u32 nopl_signature = 0x888c53b1; /* Random number */
233 u32 has_nopl = nopl_signature;
234
235 clear_cpu_cap(c, X86_FEATURE_NOPL);
236 if (c->x86 >= 6) {
237 asm volatile("\n"
238 "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
239 "2:\n"
240 " .section .fixup,\"ax\"\n"
241 "3: xor %0,%0\n"
242 " jmp 2b\n"
243 " .previous\n"
244 _ASM_EXTABLE(1b,3b)
245 : "+a" (has_nopl));
246
247 if (has_nopl == nopl_signature)
248 set_cpu_cap(c, X86_FEATURE_NOPL);
249 }
250}
251
252static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
253
254void __init early_cpu_init(void)
255{
256 struct cpu_vendor_dev *cvdev;
257
258 for (cvdev = __x86cpuvendor_start ;
259 cvdev < __x86cpuvendor_end ;
260 cvdev++)
261 cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
262 early_cpu_support_print();
263 early_identify_cpu(&boot_cpu_data);
264}
265
266/* Do some early cpuid on the boot CPU to get some parameter that are
267 needed before check_bugs. Everything advanced is in identify_cpu
268 below. */
269static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
270{
271 u32 tfms, xlvl;
272
273 c->loops_per_jiffy = loops_per_jiffy;
274 c->x86_cache_size = -1;
275 c->x86_vendor = X86_VENDOR_UNKNOWN;
276 c->x86_model = c->x86_mask = 0; /* So far unknown... */
277 c->x86_vendor_id[0] = '\0'; /* Unset */
278 c->x86_model_id[0] = '\0'; /* Unset */
279 c->x86_clflush_size = 64;
280 c->x86_cache_alignment = c->x86_clflush_size;
281 c->x86_max_cores = 1;
282 c->x86_coreid_bits = 0;
283 c->extended_cpuid_level = 0;
284 memset(&c->x86_capability, 0, sizeof c->x86_capability);
285
286 /* Get vendor name */
287 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
288 (unsigned int *)&c->x86_vendor_id[0],
289 (unsigned int *)&c->x86_vendor_id[8],
290 (unsigned int *)&c->x86_vendor_id[4]);
291
292 get_cpu_vendor(c);
293
294 /* Initialize the standard set of capabilities */
295 /* Note that the vendor-specific code below might override */
296
297 /* Intel-defined flags: level 0x00000001 */
298 if (c->cpuid_level >= 0x00000001) {
299 __u32 misc;
300 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
301 &c->x86_capability[0]);
302 c->x86 = (tfms >> 8) & 0xf;
303 c->x86_model = (tfms >> 4) & 0xf;
304 c->x86_mask = tfms & 0xf;
305 if (c->x86 == 0xf)
306 c->x86 += (tfms >> 20) & 0xff;
307 if (c->x86 >= 0x6)
308 c->x86_model += ((tfms >> 16) & 0xF) << 4;
309 if (test_cpu_cap(c, X86_FEATURE_CLFLSH))
310 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
311 } else {
312 /* Have CPUID level 0 only - unheard of */
313 c->x86 = 4;
314 }
315
316 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff;
317#ifdef CONFIG_SMP
318 c->phys_proc_id = c->initial_apicid;
319#endif
320 /* AMD-defined flags: level 0x80000001 */
321 xlvl = cpuid_eax(0x80000000);
322 c->extended_cpuid_level = xlvl;
323 if ((xlvl & 0xffff0000) == 0x80000000) {
324 if (xlvl >= 0x80000001) {
325 c->x86_capability[1] = cpuid_edx(0x80000001);
326 c->x86_capability[6] = cpuid_ecx(0x80000001);
327 }
328 if (xlvl >= 0x80000004)
329 get_model_name(c); /* Default name */
330 }
331
332 /* Transmeta-defined flags: level 0x80860001 */
333 xlvl = cpuid_eax(0x80860000);
334 if ((xlvl & 0xffff0000) == 0x80860000) {
335 /* Don't set x86_cpuid_level here for now to not confuse. */
336 if (xlvl >= 0x80860001)
337 c->x86_capability[2] = cpuid_edx(0x80860001);
338 }
339
340 if (c->extended_cpuid_level >= 0x80000007)
341 c->x86_power = cpuid_edx(0x80000007);
342
343 if (c->extended_cpuid_level >= 0x80000008) {
344 u32 eax = cpuid_eax(0x80000008);
345
346 c->x86_virt_bits = (eax >> 8) & 0xff;
347 c->x86_phys_bits = eax & 0xff;
348 }
349
350 detect_nopl(c);
351
352 if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
353 cpu_devs[c->x86_vendor]->c_early_init)
354 cpu_devs[c->x86_vendor]->c_early_init(c);
355
356 validate_pat_support(c);
357}
358
359/*
360 * This does the hard work of actually picking apart the CPU stuff...
361 */
362static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
363{
364 int i;
365
366 early_identify_cpu(c);
367
368 init_scattered_cpuid_features(c);
369
370 c->apicid = phys_pkg_id(0);
371
372 /*
373 * Vendor-specific initialization. In this section we
374 * canonicalize the feature flags, meaning if there are
375 * features a certain CPU supports which CPUID doesn't
376 * tell us, CPUID claiming incorrect flags, or other bugs,
377 * we handle them here.
378 *
379 * At the end of this section, c->x86_capability better
380 * indicate the features this CPU genuinely supports!
381 */
382 if (this_cpu->c_init)
383 this_cpu->c_init(c);
384
385 detect_ht(c);
386
387 /*
388 * On SMP, boot_cpu_data holds the common feature set between
389 * all CPUs; so make sure that we indicate which features are
390 * common between the CPUs. The first time this routine gets
391 * executed, c == &boot_cpu_data.
392 */
393 if (c != &boot_cpu_data) {
394 /* AND the already accumulated flags with these */
395 for (i = 0; i < NCAPINTS; i++)
396 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
397 }
398
399 /* Clear all flags overriden by options */
400 for (i = 0; i < NCAPINTS; i++)
401 c->x86_capability[i] &= ~cleared_cpu_caps[i];
402
403#ifdef CONFIG_X86_MCE
404 mcheck_init(c);
405#endif
406 select_idle_routine(c);
407
408#ifdef CONFIG_NUMA
409 numa_add_cpu(smp_processor_id());
410#endif
411
412}
413
414void __cpuinit identify_boot_cpu(void)
415{
416 identify_cpu(&boot_cpu_data);
417}
418
419void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
420{
421 BUG_ON(c == &boot_cpu_data);
422 identify_cpu(c);
423 mtrr_ap_init();
424}
425
426static __init int setup_noclflush(char *arg)
427{
428 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
429 return 1;
430}
431__setup("noclflush", setup_noclflush);
432
433void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
434{
435 if (c->x86_model_id[0])
436 printk(KERN_CONT "%s", c->x86_model_id);
437
438 if (c->x86_mask || c->cpuid_level >= 0)
439 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
440 else
441 printk(KERN_CONT "\n");
442}
443
444static __init int setup_disablecpuid(char *arg)
445{
446 int bit;
447 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
448 setup_clear_cpu_cap(bit);
449 else
450 return 0;
451 return 1;
452}
453__setup("clearcpuid=", setup_disablecpuid);
454
455cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
456
457struct x8664_pda **_cpu_pda __read_mostly;
458EXPORT_SYMBOL(_cpu_pda);
459
460struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
461
462char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
463
464unsigned long __supported_pte_mask __read_mostly = ~0UL;
465EXPORT_SYMBOL_GPL(__supported_pte_mask);
466
467static int do_not_nx __cpuinitdata;
468
469/* noexec=on|off
470Control non executable mappings for 64bit processes.
471
472on Enable(default)
473off Disable
474*/
475static int __init nonx_setup(char *str)
476{
477 if (!str)
478 return -EINVAL;
479 if (!strncmp(str, "on", 2)) {
480 __supported_pte_mask |= _PAGE_NX;
481 do_not_nx = 0;
482 } else if (!strncmp(str, "off", 3)) {
483 do_not_nx = 1;
484 __supported_pte_mask &= ~_PAGE_NX;
485 }
486 return 0;
487}
488early_param("noexec", nonx_setup);
489
490int force_personality32;
491
492/* noexec32=on|off
493Control non executable heap for 32bit processes.
494To control the stack too use noexec=off
495
496on PROT_READ does not imply PROT_EXEC for 32bit processes (default)
497off PROT_READ implies PROT_EXEC
498*/
499static int __init nonx32_setup(char *str)
500{
501 if (!strcmp(str, "on"))
502 force_personality32 &= ~READ_IMPLIES_EXEC;
503 else if (!strcmp(str, "off"))
504 force_personality32 |= READ_IMPLIES_EXEC;
505 return 1;
506}
507__setup("noexec32=", nonx32_setup);
508
509void pda_init(int cpu)
510{
511 struct x8664_pda *pda = cpu_pda(cpu);
512
513 /* Setup up data that may be needed in __get_free_pages early */
514 loadsegment(fs, 0);
515 loadsegment(gs, 0);
516 /* Memory clobbers used to order PDA accessed */
517 mb();
518 wrmsrl(MSR_GS_BASE, pda);
519 mb();
520
521 pda->cpunumber = cpu;
522 pda->irqcount = -1;
523 pda->kernelstack = (unsigned long)stack_thread_info() -
524 PDA_STACKOFFSET + THREAD_SIZE;
525 pda->active_mm = &init_mm;
526 pda->mmu_state = 0;
527
528 if (cpu == 0) {
529 /* others are initialized in smpboot.c */
530 pda->pcurrent = &init_task;
531 pda->irqstackptr = boot_cpu_stack;
532 pda->irqstackptr += IRQSTACKSIZE - 64;
533 } else {
534 if (!pda->irqstackptr) {
535 pda->irqstackptr = (char *)
536 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
537 if (!pda->irqstackptr)
538 panic("cannot allocate irqstack for cpu %d",
539 cpu);
540 pda->irqstackptr += IRQSTACKSIZE - 64;
541 }
542
543 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
544 pda->nodenumber = cpu_to_node(cpu);
545 }
546}
547
548char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
549 DEBUG_STKSZ] __page_aligned_bss;
550
551extern asmlinkage void ignore_sysret(void);
552
553/* May not be marked __init: used by software suspend */
554void syscall_init(void)
555{
556 /*
557 * LSTAR and STAR live in a bit strange symbiosis.
558 * They both write to the same internal register. STAR allows to
559 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
560 */
561 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
562 wrmsrl(MSR_LSTAR, system_call);
563 wrmsrl(MSR_CSTAR, ignore_sysret);
564
565#ifdef CONFIG_IA32_EMULATION
566 syscall32_cpu_init();
567#endif
568
569 /* Flags to clear on syscall */
570 wrmsrl(MSR_SYSCALL_MASK,
571 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
572}
573
574void __cpuinit check_efer(void)
575{
576 unsigned long efer;
577
578 rdmsrl(MSR_EFER, efer);
579 if (!(efer & EFER_NX) || do_not_nx)
580 __supported_pte_mask &= ~_PAGE_NX;
581}
582
583unsigned long kernel_eflags;
584
585/*
586 * Copies of the original ist values from the tss are only accessed during
587 * debugging, no special alignment required.
588 */
589DEFINE_PER_CPU(struct orig_ist, orig_ist);
590
591/*
592 * cpu_init() initializes state that is per-CPU. Some data is already
593 * initialized (naturally) in the bootstrap process, such as the GDT
594 * and IDT. We reload them nevertheless, this function acts as a
595 * 'CPU state barrier', nothing should get across.
596 * A lot of state is already set up in PDA init.
597 */
598void __cpuinit cpu_init(void)
599{
600 int cpu = stack_smp_processor_id();
601 struct tss_struct *t = &per_cpu(init_tss, cpu);
602 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
603 unsigned long v;
604 char *estacks = NULL;
605 struct task_struct *me;
606 int i;
607
608 /* CPU 0 is initialised in head64.c */
609 if (cpu != 0)
610 pda_init(cpu);
611 else
612 estacks = boot_exception_stacks;
613
614 me = current;
615
616 if (cpu_test_and_set(cpu, cpu_initialized))
617 panic("CPU#%d already initialized!\n", cpu);
618
619 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
620
621 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
622
623 /*
624 * Initialize the per-CPU GDT with the boot GDT,
625 * and set up the GDT descriptor:
626 */
627
628 switch_to_new_gdt();
629 load_idt((const struct desc_ptr *)&idt_descr);
630
631 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
632 syscall_init();
633
634 wrmsrl(MSR_FS_BASE, 0);
635 wrmsrl(MSR_KERNEL_GS_BASE, 0);
636 barrier();
637
638 check_efer();
639
640 /*
641 * set up and load the per-CPU TSS
642 */
643 if (!orig_ist->ist[0]) {
644 static const unsigned int order[N_EXCEPTION_STACKS] = {
645 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
646 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
647 };
648 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
649 if (cpu) {
650 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
651 if (!estacks)
652 panic("Cannot allocate exception "
653 "stack %ld %d\n", v, cpu);
654 }
655 estacks += PAGE_SIZE << order[v];
656 orig_ist->ist[v] = t->x86_tss.ist[v] =
657 (unsigned long)estacks;
658 }
659 }
660
661 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
662 /*
663 * <= is required because the CPU will access up to
664 * 8 bits beyond the end of the IO permission bitmap.
665 */
666 for (i = 0; i <= IO_BITMAP_LONGS; i++)
667 t->io_bitmap[i] = ~0UL;
668
669 atomic_inc(&init_mm.mm_count);
670 me->active_mm = &init_mm;
671 if (me->mm)
672 BUG();
673 enter_lazy_tlb(&init_mm, me);
674
675 load_sp0(t, &current->thread);
676 set_tss_desc(cpu, t);
677 load_TR_desc();
678 load_LDT(&init_mm.context);
679
680#ifdef CONFIG_KGDB
681 /*
682 * If the kgdb is connected no debug regs should be altered. This
683 * is only applicable when KGDB and a KGDB I/O module are built
684 * into the kernel and you are using early debugging with
685 * kgdbwait. KGDB will control the kernel HW breakpoint registers.
686 */
687 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
688 arch_kgdb_ops.correct_hw_break();
689 else {
690#endif
691 /*
692 * Clear all 6 debug registers:
693 */
694
695 set_debugreg(0UL, 0);
696 set_debugreg(0UL, 1);
697 set_debugreg(0UL, 2);
698 set_debugreg(0UL, 3);
699 set_debugreg(0UL, 6);
700 set_debugreg(0UL, 7);
701#ifdef CONFIG_KGDB
702 /* If the kgdb is connected no debug regs should be altered. */
703 }
704#endif
705
706 fpu_init();
707
708 raw_local_save_flags(kernel_eflags);
709
710 if (is_uv_system())
711 uv_cpu_init();
712}
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 4d894e8565fe..de4094a39210 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -21,23 +21,16 @@ struct cpu_dev {
21 void (*c_init)(struct cpuinfo_x86 * c); 21 void (*c_init)(struct cpuinfo_x86 * c);
22 void (*c_identify)(struct cpuinfo_x86 * c); 22 void (*c_identify)(struct cpuinfo_x86 * c);
23 unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size); 23 unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size);
24 int c_x86_vendor;
24}; 25};
25 26
26extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM]; 27#define cpu_dev_register(cpu_devX) \
28 static struct cpu_dev *__cpu_dev_##cpu_devX __used \
29 __attribute__((__section__(".x86_cpu_dev.init"))) = \
30 &cpu_devX;
27 31
28struct cpu_vendor_dev { 32extern struct cpu_dev *__x86_cpu_dev_start[], *__x86_cpu_dev_end[];
29 int vendor;
30 struct cpu_dev *cpu_dev;
31};
32
33#define cpu_vendor_dev_register(cpu_vendor_id, cpu_dev) \
34 static struct cpu_vendor_dev __cpu_vendor_dev_##cpu_vendor_id __used \
35 __attribute__((__section__(".x86cpuvendor.init"))) = \
36 { cpu_vendor_id, cpu_dev }
37
38extern struct cpu_vendor_dev __x86cpuvendor_start[], __x86cpuvendor_end[];
39 33
40extern int get_model_name(struct cpuinfo_x86 *c);
41extern void display_cacheinfo(struct cpuinfo_x86 *c); 34extern void display_cacheinfo(struct cpuinfo_x86 *c);
42 35
43#endif 36#endif
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 9943b4c87746..8e48c5d4467d 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -256,7 +256,8 @@ static u32 get_cur_val(const cpumask_t *mask)
256 * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and 256 * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
257 * no meaning should be associated with absolute values of these MSRs. 257 * no meaning should be associated with absolute values of these MSRs.
258 */ 258 */
259static unsigned int get_measured_perf(unsigned int cpu) 259static unsigned int get_measured_perf(struct cpufreq_policy *policy,
260 unsigned int cpu)
260{ 261{
261 union { 262 union {
262 struct { 263 struct {
@@ -326,7 +327,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
326 327
327#endif 328#endif
328 329
329 retval = per_cpu(drv_data, cpu)->max_freq * perf_percent / 100; 330 retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100;
330 331
331 put_cpu(); 332 put_cpu();
332 set_cpus_allowed_ptr(current, &saved_mask); 333 set_cpus_allowed_ptr(current, &saved_mask);
@@ -788,7 +789,11 @@ static int __init acpi_cpufreq_init(void)
788 if (ret) 789 if (ret)
789 return ret; 790 return ret;
790 791
791 return cpufreq_register_driver(&acpi_cpufreq_driver); 792 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
793 if (ret)
794 free_percpu(acpi_perf_data);
795
796 return ret;
792} 797}
793 798
794static void __exit acpi_cpufreq_exit(void) 799static void __exit acpi_cpufreq_exit(void)
@@ -798,8 +803,6 @@ static void __exit acpi_cpufreq_exit(void)
798 cpufreq_unregister_driver(&acpi_cpufreq_driver); 803 cpufreq_unregister_driver(&acpi_cpufreq_driver);
799 804
800 free_percpu(acpi_perf_data); 805 free_percpu(acpi_perf_data);
801
802 return;
803} 806}
804 807
805module_param(acpi_pstate_strict, uint, 0644); 808module_param(acpi_pstate_strict, uint, 0644);
diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c
index e4a4bf870e94..fe613c93b366 100644
--- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/elanfreq.c
@@ -25,8 +25,8 @@
25#include <linux/cpufreq.h> 25#include <linux/cpufreq.h>
26 26
27#include <asm/msr.h> 27#include <asm/msr.h>
28#include <asm/timex.h> 28#include <linux/timex.h>
29#include <asm/io.h> 29#include <linux/io.h>
30 30
31#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */ 31#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */
32#define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */ 32#define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */
@@ -82,7 +82,7 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
82 u8 clockspeed_reg; /* Clock Speed Register */ 82 u8 clockspeed_reg; /* Clock Speed Register */
83 83
84 local_irq_disable(); 84 local_irq_disable();
85 outb_p(0x80,REG_CSCIR); 85 outb_p(0x80, REG_CSCIR);
86 clockspeed_reg = inb_p(REG_CSCDR); 86 clockspeed_reg = inb_p(REG_CSCDR);
87 local_irq_enable(); 87 local_irq_enable();
88 88
@@ -98,10 +98,10 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
98 } 98 }
99 99
100 /* 33 MHz is not 32 MHz... */ 100 /* 33 MHz is not 32 MHz... */
101 if ((clockspeed_reg & 0xE0)==0xA0) 101 if ((clockspeed_reg & 0xE0) == 0xA0)
102 return 33000; 102 return 33000;
103 103
104 return ((1<<((clockspeed_reg & 0xE0) >> 5)) * 1000); 104 return (1<<((clockspeed_reg & 0xE0) >> 5)) * 1000;
105} 105}
106 106
107 107
@@ -117,7 +117,7 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
117 * There is no return value. 117 * There is no return value.
118 */ 118 */
119 119
120static void elanfreq_set_cpu_state (unsigned int state) 120static void elanfreq_set_cpu_state(unsigned int state)
121{ 121{
122 struct cpufreq_freqs freqs; 122 struct cpufreq_freqs freqs;
123 123
@@ -144,20 +144,20 @@ static void elanfreq_set_cpu_state (unsigned int state)
144 */ 144 */
145 145
146 local_irq_disable(); 146 local_irq_disable();
147 outb_p(0x40,REG_CSCIR); /* Disable hyperspeed mode */ 147 outb_p(0x40, REG_CSCIR); /* Disable hyperspeed mode */
148 outb_p(0x00,REG_CSCDR); 148 outb_p(0x00, REG_CSCDR);
149 local_irq_enable(); /* wait till internal pipelines and */ 149 local_irq_enable(); /* wait till internal pipelines and */
150 udelay(1000); /* buffers have cleaned up */ 150 udelay(1000); /* buffers have cleaned up */
151 151
152 local_irq_disable(); 152 local_irq_disable();
153 153
154 /* now, set the CPU clock speed register (0x80) */ 154 /* now, set the CPU clock speed register (0x80) */
155 outb_p(0x80,REG_CSCIR); 155 outb_p(0x80, REG_CSCIR);
156 outb_p(elan_multiplier[state].val80h,REG_CSCDR); 156 outb_p(elan_multiplier[state].val80h, REG_CSCDR);
157 157
158 /* now, the hyperspeed bit in PMU Force Mode Register (0x40) */ 158 /* now, the hyperspeed bit in PMU Force Mode Register (0x40) */
159 outb_p(0x40,REG_CSCIR); 159 outb_p(0x40, REG_CSCIR);
160 outb_p(elan_multiplier[state].val40h,REG_CSCDR); 160 outb_p(elan_multiplier[state].val40h, REG_CSCDR);
161 udelay(10000); 161 udelay(10000);
162 local_irq_enable(); 162 local_irq_enable();
163 163
@@ -173,12 +173,12 @@ static void elanfreq_set_cpu_state (unsigned int state)
173 * for the hardware supported by the driver. 173 * for the hardware supported by the driver.
174 */ 174 */
175 175
176static int elanfreq_verify (struct cpufreq_policy *policy) 176static int elanfreq_verify(struct cpufreq_policy *policy)
177{ 177{
178 return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]); 178 return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]);
179} 179}
180 180
181static int elanfreq_target (struct cpufreq_policy *policy, 181static int elanfreq_target(struct cpufreq_policy *policy,
182 unsigned int target_freq, 182 unsigned int target_freq,
183 unsigned int relation) 183 unsigned int relation)
184{ 184{
@@ -205,7 +205,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
205 205
206 /* capability check */ 206 /* capability check */
207 if ((c->x86_vendor != X86_VENDOR_AMD) || 207 if ((c->x86_vendor != X86_VENDOR_AMD) ||
208 (c->x86 != 4) || (c->x86_model!=10)) 208 (c->x86 != 4) || (c->x86_model != 10))
209 return -ENODEV; 209 return -ENODEV;
210 210
211 /* max freq */ 211 /* max freq */
@@ -213,7 +213,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
213 max_freq = elanfreq_get_cpu_frequency(0); 213 max_freq = elanfreq_get_cpu_frequency(0);
214 214
215 /* table init */ 215 /* table init */
216 for (i=0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) { 216 for (i = 0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) {
217 if (elanfreq_table[i].frequency > max_freq) 217 if (elanfreq_table[i].frequency > max_freq)
218 elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID; 218 elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
219 } 219 }
@@ -224,7 +224,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
224 224
225 result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table); 225 result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table);
226 if (result) 226 if (result)
227 return (result); 227 return result;
228 228
229 cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu); 229 cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu);
230 return 0; 230 return 0;
@@ -260,7 +260,7 @@ __setup("elanfreq=", elanfreq_setup);
260#endif 260#endif
261 261
262 262
263static struct freq_attr* elanfreq_attr[] = { 263static struct freq_attr *elanfreq_attr[] = {
264 &cpufreq_freq_attr_scaling_available_freqs, 264 &cpufreq_freq_attr_scaling_available_freqs,
265 NULL, 265 NULL,
266}; 266};
@@ -284,9 +284,9 @@ static int __init elanfreq_init(void)
284 284
285 /* Test if we have the right hardware */ 285 /* Test if we have the right hardware */
286 if ((c->x86_vendor != X86_VENDOR_AMD) || 286 if ((c->x86_vendor != X86_VENDOR_AMD) ||
287 (c->x86 != 4) || (c->x86_model!=10)) { 287 (c->x86 != 4) || (c->x86_model != 10)) {
288 printk(KERN_INFO "elanfreq: error: no Elan processor found!\n"); 288 printk(KERN_INFO "elanfreq: error: no Elan processor found!\n");
289 return -ENODEV; 289 return -ENODEV;
290 } 290 }
291 return cpufreq_register_driver(&elanfreq_driver); 291 return cpufreq_register_driver(&elanfreq_driver);
292} 292}
@@ -298,7 +298,7 @@ static void __exit elanfreq_exit(void)
298} 298}
299 299
300 300
301module_param (max_freq, int, 0444); 301module_param(max_freq, int, 0444);
302 302
303MODULE_LICENSE("GPL"); 303MODULE_LICENSE("GPL");
304MODULE_AUTHOR("Robert Schwebel <r.schwebel@pengutronix.de>, Sven Geggus <sven@geggus.net>"); 304MODULE_AUTHOR("Robert Schwebel <r.schwebel@pengutronix.de>, Sven Geggus <sven@geggus.net>");
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
index 06fcce516d51..b0461856acfb 100644
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * (C) 2001-2004 Dave Jones. <davej@codemonkey.org.uk> 2 * (C) 2001-2004 Dave Jones. <davej@redhat.com>
3 * (C) 2002 Padraig Brady. <padraig@antefacto.com> 3 * (C) 2002 Padraig Brady. <padraig@antefacto.com>
4 * 4 *
5 * Licensed under the terms of the GNU GPL License version 2. 5 * Licensed under the terms of the GNU GPL License version 2.
@@ -1019,7 +1019,7 @@ MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
1019module_param(revid_errata, int, 0644); 1019module_param(revid_errata, int, 0644);
1020MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID"); 1020MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
1021 1021
1022MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>"); 1022MODULE_AUTHOR ("Dave Jones <davej@redhat.com>");
1023MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); 1023MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors.");
1024MODULE_LICENSE ("GPL"); 1024MODULE_LICENSE ("GPL");
1025 1025
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
index eb9b62b0830c..c1ac5790c63e 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
@@ -15,12 +15,11 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16 16
17#include <asm/msr.h> 17#include <asm/msr.h>
18#include <asm/timex.h> 18#include <linux/timex.h>
19#include <asm/io.h> 19#include <linux/io.h>
20 20
21 21#define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long
22#define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long 22 as it is unused */
23 as it is unused */
24 23
25static unsigned int busfreq; /* FSB, in 10 kHz */ 24static unsigned int busfreq; /* FSB, in 10 kHz */
26static unsigned int max_multiplier; 25static unsigned int max_multiplier;
@@ -53,7 +52,7 @@ static int powernow_k6_get_cpu_multiplier(void)
53 52
54 msrval = POWERNOW_IOPORT + 0x1; 53 msrval = POWERNOW_IOPORT + 0x1;
55 wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ 54 wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
56 invalue=inl(POWERNOW_IOPORT + 0x8); 55 invalue = inl(POWERNOW_IOPORT + 0x8);
57 msrval = POWERNOW_IOPORT + 0x0; 56 msrval = POWERNOW_IOPORT + 0x0;
58 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ 57 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
59 58
@@ -67,9 +66,9 @@ static int powernow_k6_get_cpu_multiplier(void)
67 * 66 *
68 * Tries to change the PowerNow! multiplier 67 * Tries to change the PowerNow! multiplier
69 */ 68 */
70static void powernow_k6_set_state (unsigned int best_i) 69static void powernow_k6_set_state(unsigned int best_i)
71{ 70{
72 unsigned long outvalue=0, invalue=0; 71 unsigned long outvalue = 0, invalue = 0;
73 unsigned long msrval; 72 unsigned long msrval;
74 struct cpufreq_freqs freqs; 73 struct cpufreq_freqs freqs;
75 74
@@ -90,10 +89,10 @@ static void powernow_k6_set_state (unsigned int best_i)
90 89
91 msrval = POWERNOW_IOPORT + 0x1; 90 msrval = POWERNOW_IOPORT + 0x1;
92 wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ 91 wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
93 invalue=inl(POWERNOW_IOPORT + 0x8); 92 invalue = inl(POWERNOW_IOPORT + 0x8);
94 invalue = invalue & 0xf; 93 invalue = invalue & 0xf;
95 outvalue = outvalue | invalue; 94 outvalue = outvalue | invalue;
96 outl(outvalue ,(POWERNOW_IOPORT + 0x8)); 95 outl(outvalue , (POWERNOW_IOPORT + 0x8));
97 msrval = POWERNOW_IOPORT + 0x0; 96 msrval = POWERNOW_IOPORT + 0x0;
98 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ 97 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
99 98
@@ -124,7 +123,7 @@ static int powernow_k6_verify(struct cpufreq_policy *policy)
124 * 123 *
125 * sets a new CPUFreq policy 124 * sets a new CPUFreq policy
126 */ 125 */
127static int powernow_k6_target (struct cpufreq_policy *policy, 126static int powernow_k6_target(struct cpufreq_policy *policy,
128 unsigned int target_freq, 127 unsigned int target_freq,
129 unsigned int relation) 128 unsigned int relation)
130{ 129{
@@ -152,7 +151,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
152 busfreq = cpu_khz / max_multiplier; 151 busfreq = cpu_khz / max_multiplier;
153 152
154 /* table init */ 153 /* table init */
155 for (i=0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { 154 for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
156 if (clock_ratio[i].index > max_multiplier) 155 if (clock_ratio[i].index > max_multiplier)
157 clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; 156 clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID;
158 else 157 else
@@ -165,7 +164,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
165 164
166 result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); 165 result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
167 if (result) 166 if (result)
168 return (result); 167 return result;
169 168
170 cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu); 169 cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu);
171 170
@@ -176,8 +175,8 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
176static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) 175static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
177{ 176{
178 unsigned int i; 177 unsigned int i;
179 for (i=0; i<8; i++) { 178 for (i = 0; i < 8; i++) {
180 if (i==max_multiplier) 179 if (i == max_multiplier)
181 powernow_k6_set_state(i); 180 powernow_k6_set_state(i);
182 } 181 }
183 cpufreq_frequency_table_put_attr(policy->cpu); 182 cpufreq_frequency_table_put_attr(policy->cpu);
@@ -189,7 +188,7 @@ static unsigned int powernow_k6_get(unsigned int cpu)
189 return busfreq * powernow_k6_get_cpu_multiplier(); 188 return busfreq * powernow_k6_get_cpu_multiplier();
190} 189}
191 190
192static struct freq_attr* powernow_k6_attr[] = { 191static struct freq_attr *powernow_k6_attr[] = {
193 &cpufreq_freq_attr_scaling_available_freqs, 192 &cpufreq_freq_attr_scaling_available_freqs,
194 NULL, 193 NULL,
195}; 194};
@@ -227,7 +226,7 @@ static int __init powernow_k6_init(void)
227 } 226 }
228 227
229 if (cpufreq_register_driver(&powernow_k6_driver)) { 228 if (cpufreq_register_driver(&powernow_k6_driver)) {
230 release_region (POWERNOW_IOPORT, 16); 229 release_region(POWERNOW_IOPORT, 16);
231 return -EINVAL; 230 return -EINVAL;
232 } 231 }
233 232
@@ -243,13 +242,13 @@ static int __init powernow_k6_init(void)
243static void __exit powernow_k6_exit(void) 242static void __exit powernow_k6_exit(void)
244{ 243{
245 cpufreq_unregister_driver(&powernow_k6_driver); 244 cpufreq_unregister_driver(&powernow_k6_driver);
246 release_region (POWERNOW_IOPORT, 16); 245 release_region(POWERNOW_IOPORT, 16);
247} 246}
248 247
249 248
250MODULE_AUTHOR ("Arjan van de Ven <arjanv@redhat.com>, Dave Jones <davej@codemonkey.org.uk>, Dominik Brodowski <linux@brodo.de>"); 249MODULE_AUTHOR("Arjan van de Ven, Dave Jones <davej@redhat.com>, Dominik Brodowski <linux@brodo.de>");
251MODULE_DESCRIPTION ("PowerNow! driver for AMD K6-2+ / K6-3+ processors."); 250MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors.");
252MODULE_LICENSE ("GPL"); 251MODULE_LICENSE("GPL");
253 252
254module_init(powernow_k6_init); 253module_init(powernow_k6_init);
255module_exit(powernow_k6_exit); 254module_exit(powernow_k6_exit);
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
index 0a61159d7b71..7c7d56b43136 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * AMD K7 Powernow driver. 2 * AMD K7 Powernow driver.
3 * (C) 2003 Dave Jones <davej@codemonkey.org.uk> on behalf of SuSE Labs. 3 * (C) 2003 Dave Jones on behalf of SuSE Labs.
4 * (C) 2003-2004 Dave Jones <davej@redhat.com> 4 * (C) 2003-2004 Dave Jones <davej@redhat.com>
5 * 5 *
6 * Licensed under the terms of the GNU GPL License version 2. 6 * Licensed under the terms of the GNU GPL License version 2.
@@ -692,7 +692,7 @@ static void __exit powernow_exit (void)
692module_param(acpi_force, int, 0444); 692module_param(acpi_force, int, 0444);
693MODULE_PARM_DESC(acpi_force, "Force ACPI to be used."); 693MODULE_PARM_DESC(acpi_force, "Force ACPI to be used.");
694 694
695MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>"); 695MODULE_AUTHOR ("Dave Jones <davej@redhat.com>");
696MODULE_DESCRIPTION ("Powernow driver for AMD K7 processors."); 696MODULE_DESCRIPTION ("Powernow driver for AMD K7 processors.");
697MODULE_LICENSE ("GPL"); 697MODULE_LICENSE ("GPL");
698 698
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 4e0c6abd7ca4..d3dcd58b87cd 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -7,7 +7,7 @@
7 * Support : mark.langsdorf@amd.com 7 * Support : mark.langsdorf@amd.com
8 * 8 *
9 * Based on the powernow-k7.c module written by Dave Jones. 9 * Based on the powernow-k7.c module written by Dave Jones.
10 * (C) 2003 Dave Jones <davej@codemonkey.org.uk> on behalf of SuSE Labs 10 * (C) 2003 Dave Jones on behalf of SuSE Labs
11 * (C) 2004 Dominik Brodowski <linux@brodo.de> 11 * (C) 2004 Dominik Brodowski <linux@brodo.de>
12 * (C) 2004 Pavel Machek <pavel@suse.cz> 12 * (C) 2004 Pavel Machek <pavel@suse.cz>
13 * Licensed under the terms of the GNU GPL License version 2. 13 * Licensed under the terms of the GNU GPL License version 2.
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 191f7263c61d..04d0376b64b0 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -431,7 +431,7 @@ static void __exit speedstep_exit(void)
431} 431}
432 432
433 433
434MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>, Dominik Brodowski <linux@brodo.de>"); 434MODULE_AUTHOR ("Dave Jones <davej@redhat.com>, Dominik Brodowski <linux@brodo.de>");
435MODULE_DESCRIPTION ("Speedstep driver for Intel mobile processors on chipsets with ICH-M southbridges."); 435MODULE_DESCRIPTION ("Speedstep driver for Intel mobile processors on chipsets with ICH-M southbridges.");
436MODULE_LICENSE ("GPL"); 436MODULE_LICENSE ("GPL");
437 437
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 898a5a2002ed..ffd0f5ed071a 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -121,7 +121,7 @@ static void __cpuinit set_cx86_reorder(void)
121 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ 121 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
122 122
123 /* Load/Store Serialize to mem access disable (=reorder it) */ 123 /* Load/Store Serialize to mem access disable (=reorder it) */
124 setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80); 124 setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80);
125 /* set load/store serialize from 1GB to 4GB */ 125 /* set load/store serialize from 1GB to 4GB */
126 ccr3 |= 0xe0; 126 ccr3 |= 0xe0;
127 setCx86(CX86_CCR3, ccr3); 127 setCx86(CX86_CCR3, ccr3);
@@ -132,11 +132,11 @@ static void __cpuinit set_cx86_memwb(void)
132 printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); 132 printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
133 133
134 /* CCR2 bit 2: unlock NW bit */ 134 /* CCR2 bit 2: unlock NW bit */
135 setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04); 135 setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
136 /* set 'Not Write-through' */ 136 /* set 'Not Write-through' */
137 write_cr0(read_cr0() | X86_CR0_NW); 137 write_cr0(read_cr0() | X86_CR0_NW);
138 /* CCR2 bit 2: lock NW bit and set WT1 */ 138 /* CCR2 bit 2: lock NW bit and set WT1 */
139 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); 139 setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14);
140} 140}
141 141
142/* 142/*
@@ -150,14 +150,14 @@ static void __cpuinit geode_configure(void)
150 local_irq_save(flags); 150 local_irq_save(flags);
151 151
152 /* Suspend on halt power saving and enable #SUSP pin */ 152 /* Suspend on halt power saving and enable #SUSP pin */
153 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88); 153 setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88);
154 154
155 ccr3 = getCx86(CX86_CCR3); 155 ccr3 = getCx86(CX86_CCR3);
156 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ 156 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
157 157
158 158
159 /* FPU fast, DTE cache, Mem bypass */ 159 /* FPU fast, DTE cache, Mem bypass */
160 setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38); 160 setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38);
161 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ 161 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
162 162
163 set_cx86_memwb(); 163 set_cx86_memwb();
@@ -291,7 +291,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
291 /* GXm supports extended cpuid levels 'ala' AMD */ 291 /* GXm supports extended cpuid levels 'ala' AMD */
292 if (c->cpuid_level == 2) { 292 if (c->cpuid_level == 2) {
293 /* Enable cxMMX extensions (GX1 Datasheet 54) */ 293 /* Enable cxMMX extensions (GX1 Datasheet 54) */
294 setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1); 294 setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1);
295 295
296 /* 296 /*
297 * GXm : 0x30 ... 0x5f GXm datasheet 51 297 * GXm : 0x30 ... 0x5f GXm datasheet 51
@@ -301,7 +301,6 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
301 */ 301 */
302 if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f)) 302 if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f))
303 geode_configure(); 303 geode_configure();
304 get_model_name(c); /* get CPU marketing name */
305 return; 304 return;
306 } else { /* MediaGX */ 305 } else { /* MediaGX */
307 Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4'; 306 Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
@@ -314,7 +313,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
314 if (dir1 > 7) { 313 if (dir1 > 7) {
315 dir0_msn++; /* M II */ 314 dir0_msn++; /* M II */
316 /* Enable MMX extensions (App note 108) */ 315 /* Enable MMX extensions (App note 108) */
317 setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1); 316 setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
318 } else { 317 } else {
319 c->coma_bug = 1; /* 6x86MX, it has the bug. */ 318 c->coma_bug = 1; /* 6x86MX, it has the bug. */
320 } 319 }
@@ -429,7 +428,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
429 local_irq_save(flags); 428 local_irq_save(flags);
430 ccr3 = getCx86(CX86_CCR3); 429 ccr3 = getCx86(CX86_CCR3);
431 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ 430 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
432 setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80); /* enable cpuid */ 431 setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); /* enable cpuid */
433 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ 432 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
434 local_irq_restore(flags); 433 local_irq_restore(flags);
435 } 434 }
@@ -442,14 +441,16 @@ static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
442 .c_early_init = early_init_cyrix, 441 .c_early_init = early_init_cyrix,
443 .c_init = init_cyrix, 442 .c_init = init_cyrix,
444 .c_identify = cyrix_identify, 443 .c_identify = cyrix_identify,
444 .c_x86_vendor = X86_VENDOR_CYRIX,
445}; 445};
446 446
447cpu_vendor_dev_register(X86_VENDOR_CYRIX, &cyrix_cpu_dev); 447cpu_dev_register(cyrix_cpu_dev);
448 448
449static struct cpu_dev nsc_cpu_dev __cpuinitdata = { 449static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
450 .c_vendor = "NSC", 450 .c_vendor = "NSC",
451 .c_ident = { "Geode by NSC" }, 451 .c_ident = { "Geode by NSC" },
452 .c_init = init_nsc, 452 .c_init = init_nsc,
453 .c_x86_vendor = X86_VENDOR_NSC,
453}; 454};
454 455
455cpu_vendor_dev_register(X86_VENDOR_NSC, &nsc_cpu_dev); 456cpu_dev_register(nsc_cpu_dev);
diff --git a/arch/x86/kernel/cpu/feature_names.c b/arch/x86/kernel/cpu/feature_names.c
deleted file mode 100644
index c9017799497c..000000000000
--- a/arch/x86/kernel/cpu/feature_names.c
+++ /dev/null
@@ -1,84 +0,0 @@
1/*
2 * Strings for the various x86 capability flags.
3 *
4 * This file must not contain any executable code.
5 */
6
7#include <asm/cpufeature.h>
8
9/*
10 * These flag bits must match the definitions in <asm/cpufeature.h>.
11 * NULL means this bit is undefined or reserved; either way it doesn't
12 * have meaning as far as Linux is concerned. Note that it's important
13 * to realize there is a difference between this table and CPUID -- if
14 * applications want to get the raw CPUID data, they should access
15 * /dev/cpu/<cpu_nr>/cpuid instead.
16 */
17const char * const x86_cap_flags[NCAPINTS*32] = {
18 /* Intel-defined */
19 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
20 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
21 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
22 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
23
24 /* AMD-defined */
25 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
26 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
27 NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL,
28 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
29 "3dnowext", "3dnow",
30
31 /* Transmeta-defined */
32 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
33 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
34 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
35 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
36
37 /* Other (Linux-defined) */
38 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
39 NULL, NULL, NULL, NULL,
40 "constant_tsc", "up", NULL, "arch_perfmon",
41 "pebs", "bts", NULL, NULL,
42 "rep_good", NULL, NULL, NULL,
43 "nopl", NULL, NULL, NULL,
44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
45
46 /* Intel-defined (#2) */
47 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
48 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
49 NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
50 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
51
52 /* VIA/Cyrix/Centaur-defined */
53 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
54 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
55 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
56 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
57
58 /* AMD-defined (#2) */
59 "lahf_lm", "cmp_legacy", "svm", "extapic",
60 "cr8_legacy", "abm", "sse4a", "misalignsse",
61 "3dnowprefetch", "osvw", "ibs", "sse5",
62 "skinit", "wdt", NULL, NULL,
63 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
64 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
65
66 /* Auxiliary (Linux-defined) */
67 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
68 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
69 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
70 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
71};
72
73const char *const x86_power_flags[32] = {
74 "ts", /* temperature sensor */
75 "fid", /* frequency id control */
76 "vid", /* voltage id control */
77 "ttp", /* thermal trip */
78 "tm",
79 "stc",
80 "100mhzsteps",
81 "hwpstate",
82 "", /* tsc invariant mapped to constant_tsc */
83 /* nothing */
84};
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index b75f2569b8f8..cce0b6118d55 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -15,6 +15,11 @@
15#include <asm/ds.h> 15#include <asm/ds.h>
16#include <asm/bugs.h> 16#include <asm/bugs.h>
17 17
18#ifdef CONFIG_X86_64
19#include <asm/topology.h>
20#include <asm/numa_64.h>
21#endif
22
18#include "cpu.h" 23#include "cpu.h"
19 24
20#ifdef CONFIG_X86_LOCAL_APIC 25#ifdef CONFIG_X86_LOCAL_APIC
@@ -23,23 +28,22 @@
23#include <mach_apic.h> 28#include <mach_apic.h>
24#endif 29#endif
25 30
26#ifdef CONFIG_X86_INTEL_USERCOPY
27/*
28 * Alignment at which movsl is preferred for bulk memory copies.
29 */
30struct movsl_mask movsl_mask __read_mostly;
31#endif
32
33static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 31static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
34{ 32{
35 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
36 if (c->x86 == 15 && c->x86_cache_alignment == 64)
37 c->x86_cache_alignment = 128;
38 if ((c->x86 == 0xf && c->x86_model >= 0x03) || 33 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
39 (c->x86 == 0x6 && c->x86_model >= 0x0e)) 34 (c->x86 == 0x6 && c->x86_model >= 0x0e))
40 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 35 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
36
37#ifdef CONFIG_X86_64
38 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
39#else
40 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
41 if (c->x86 == 15 && c->x86_cache_alignment == 64)
42 c->x86_cache_alignment = 128;
43#endif
41} 44}
42 45
46#ifdef CONFIG_X86_32
43/* 47/*
44 * Early probe support logic for ppro memory erratum #50 48 * Early probe support logic for ppro memory erratum #50
45 * 49 *
@@ -59,15 +63,54 @@ int __cpuinit ppro_with_ram_bug(void)
59 return 0; 63 return 0;
60} 64}
61 65
66#ifdef CONFIG_X86_F00F_BUG
67static void __cpuinit trap_init_f00f_bug(void)
68{
69 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
62 70
63/* 71 /*
64 * P4 Xeon errata 037 workaround. 72 * Update the IDT descriptor and reload the IDT so that
65 * Hardware prefetcher may cause stale data to be loaded into the cache. 73 * it uses the read-only mapped virtual address.
66 */ 74 */
67static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) 75 idt_descr.address = fix_to_virt(FIX_F00F_IDT);
76 load_idt(&idt_descr);
77}
78#endif
79
80static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
68{ 81{
69 unsigned long lo, hi; 82 unsigned long lo, hi;
70 83
84#ifdef CONFIG_X86_F00F_BUG
85 /*
86 * All current models of Pentium and Pentium with MMX technology CPUs
87 * have the F0 0F bug, which lets nonprivileged users lock up the system.
88 * Note that the workaround only should be initialized once...
89 */
90 c->f00f_bug = 0;
91 if (!paravirt_enabled() && c->x86 == 5) {
92 static int f00f_workaround_enabled;
93
94 c->f00f_bug = 1;
95 if (!f00f_workaround_enabled) {
96 trap_init_f00f_bug();
97 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
98 f00f_workaround_enabled = 1;
99 }
100 }
101#endif
102
103 /*
104 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
105 * model 3 mask 3
106 */
107 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
108 clear_cpu_cap(c, X86_FEATURE_SEP);
109
110 /*
111 * P4 Xeon errata 037 workaround.
112 * Hardware prefetcher may cause stale data to be loaded into the cache.
113 */
71 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { 114 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
72 rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); 115 rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
73 if ((lo & (1<<9)) == 0) { 116 if ((lo & (1<<9)) == 0) {
@@ -77,13 +120,68 @@ static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
77 wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); 120 wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
78 } 121 }
79 } 122 }
123
124 /*
125 * See if we have a good local APIC by checking for buggy Pentia,
126 * i.e. all B steppings and the C2 stepping of P54C when using their
127 * integrated APIC (see 11AP erratum in "Pentium Processor
128 * Specification Update").
129 */
130 if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
131 (c->x86_mask < 0x6 || c->x86_mask == 0xb))
132 set_cpu_cap(c, X86_FEATURE_11AP);
133
134
135#ifdef CONFIG_X86_INTEL_USERCOPY
136 /*
137 * Set up the preferred alignment for movsl bulk memory moves
138 */
139 switch (c->x86) {
140 case 4: /* 486: untested */
141 break;
142 case 5: /* Old Pentia: untested */
143 break;
144 case 6: /* PII/PIII only like movsl with 8-byte alignment */
145 movsl_mask.mask = 7;
146 break;
147 case 15: /* P4 is OK down to 8-byte alignment */
148 movsl_mask.mask = 7;
149 break;
150 }
151#endif
152
153#ifdef CONFIG_X86_NUMAQ
154 numaq_tsc_disable();
155#endif
80} 156}
157#else
158static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
159{
160}
161#endif
81 162
163static void __cpuinit srat_detect_node(void)
164{
165#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
166 unsigned node;
167 int cpu = smp_processor_id();
168 int apicid = hard_smp_processor_id();
169
170 /* Don't do the funky fallback heuristics the AMD version employs
171 for now. */
172 node = apicid_to_node[apicid];
173 if (node == NUMA_NO_NODE || !node_online(node))
174 node = first_node(node_online_map);
175 numa_set_node(cpu, node);
176
177 printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
178#endif
179}
82 180
83/* 181/*
84 * find out the number of processor cores on the die 182 * find out the number of processor cores on the die
85 */ 183 */
86static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) 184static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
87{ 185{
88 unsigned int eax, ebx, ecx, edx; 186 unsigned int eax, ebx, ecx, edx;
89 187
@@ -98,45 +196,51 @@ static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c)
98 return 1; 196 return 1;
99} 197}
100 198
101#ifdef CONFIG_X86_F00F_BUG 199static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
102static void __cpuinit trap_init_f00f_bug(void)
103{ 200{
104 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); 201 /* Intel VMX MSR indicated features */
105 202#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
106 /* 203#define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
107 * Update the IDT descriptor and reload the IDT so that 204#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
108 * it uses the read-only mapped virtual address. 205#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
109 */ 206#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
110 idt_descr.address = fix_to_virt(FIX_F00F_IDT); 207#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
111 load_idt(&idt_descr); 208
209 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
210
211 clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
212 clear_cpu_cap(c, X86_FEATURE_VNMI);
213 clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
214 clear_cpu_cap(c, X86_FEATURE_EPT);
215 clear_cpu_cap(c, X86_FEATURE_VPID);
216
217 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
218 msr_ctl = vmx_msr_high | vmx_msr_low;
219 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
220 set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
221 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
222 set_cpu_cap(c, X86_FEATURE_VNMI);
223 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
224 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
225 vmx_msr_low, vmx_msr_high);
226 msr_ctl2 = vmx_msr_high | vmx_msr_low;
227 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
228 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
229 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
230 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
231 set_cpu_cap(c, X86_FEATURE_EPT);
232 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
233 set_cpu_cap(c, X86_FEATURE_VPID);
234 }
112} 235}
113#endif
114 236
115static void __cpuinit init_intel(struct cpuinfo_x86 *c) 237static void __cpuinit init_intel(struct cpuinfo_x86 *c)
116{ 238{
117 unsigned int l2 = 0; 239 unsigned int l2 = 0;
118 char *p = NULL;
119 240
120 early_init_intel(c); 241 early_init_intel(c);
121 242
122#ifdef CONFIG_X86_F00F_BUG 243 intel_workarounds(c);
123 /*
124 * All current models of Pentium and Pentium with MMX technology CPUs
125 * have the F0 0F bug, which lets nonprivileged users lock up the system.
126 * Note that the workaround only should be initialized once...
127 */
128 c->f00f_bug = 0;
129 if (!paravirt_enabled() && c->x86 == 5) {
130 static int f00f_workaround_enabled;
131
132 c->f00f_bug = 1;
133 if (!f00f_workaround_enabled) {
134 trap_init_f00f_bug();
135 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
136 f00f_workaround_enabled = 1;
137 }
138 }
139#endif
140 244
141 l2 = init_intel_cacheinfo(c); 245 l2 = init_intel_cacheinfo(c);
142 if (c->cpuid_level > 9) { 246 if (c->cpuid_level > 9) {
@@ -146,16 +250,32 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
146 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); 250 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
147 } 251 }
148 252
149 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ 253 if (cpu_has_xmm2)
150 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) 254 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
151 clear_cpu_cap(c, X86_FEATURE_SEP); 255 if (cpu_has_ds) {
256 unsigned int l1;
257 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
258 if (!(l1 & (1<<11)))
259 set_cpu_cap(c, X86_FEATURE_BTS);
260 if (!(l1 & (1<<12)))
261 set_cpu_cap(c, X86_FEATURE_PEBS);
262 ds_init_intel(c);
263 }
152 264
265#ifdef CONFIG_X86_64
266 if (c->x86 == 15)
267 c->x86_cache_alignment = c->x86_clflush_size * 2;
268 if (c->x86 == 6)
269 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
270#else
153 /* 271 /*
154 * Names for the Pentium II/Celeron processors 272 * Names for the Pentium II/Celeron processors
155 * detectable only by also checking the cache size. 273 * detectable only by also checking the cache size.
156 * Dixon is NOT a Celeron. 274 * Dixon is NOT a Celeron.
157 */ 275 */
158 if (c->x86 == 6) { 276 if (c->x86 == 6) {
277 char *p = NULL;
278
159 switch (c->x86_model) { 279 switch (c->x86_model) {
160 case 5: 280 case 5:
161 if (c->x86_mask == 0) { 281 if (c->x86_mask == 0) {
@@ -178,70 +298,41 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
178 p = "Celeron (Coppermine)"; 298 p = "Celeron (Coppermine)";
179 break; 299 break;
180 } 300 }
181 }
182
183 if (p)
184 strcpy(c->x86_model_id, p);
185
186 c->x86_max_cores = num_cpu_cores(c);
187
188 detect_ht(c);
189 301
190 /* Work around errata */ 302 if (p)
191 Intel_errata_workarounds(c); 303 strcpy(c->x86_model_id, p);
192
193#ifdef CONFIG_X86_INTEL_USERCOPY
194 /*
195 * Set up the preferred alignment for movsl bulk memory moves
196 */
197 switch (c->x86) {
198 case 4: /* 486: untested */
199 break;
200 case 5: /* Old Pentia: untested */
201 break;
202 case 6: /* PII/PIII only like movsl with 8-byte alignment */
203 movsl_mask.mask = 7;
204 break;
205 case 15: /* P4 is OK down to 8-byte alignment */
206 movsl_mask.mask = 7;
207 break;
208 } 304 }
209#endif
210 305
211 if (cpu_has_xmm2) 306 if (c->x86 == 15)
212 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
213 if (c->x86 == 15) {
214 set_cpu_cap(c, X86_FEATURE_P4); 307 set_cpu_cap(c, X86_FEATURE_P4);
215 }
216 if (c->x86 == 6) 308 if (c->x86 == 6)
217 set_cpu_cap(c, X86_FEATURE_P3); 309 set_cpu_cap(c, X86_FEATURE_P3);
218 if (cpu_has_ds) {
219 unsigned int l1;
220 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
221 if (!(l1 & (1<<11)))
222 set_cpu_cap(c, X86_FEATURE_BTS);
223 if (!(l1 & (1<<12)))
224 set_cpu_cap(c, X86_FEATURE_PEBS);
225 }
226 310
227 if (cpu_has_bts) 311 if (cpu_has_bts)
228 ds_init_intel(c); 312 ptrace_bts_init_intel(c);
229 313
230 /* 314#endif
231 * See if we have a good local APIC by checking for buggy Pentia,
232 * i.e. all B steppings and the C2 stepping of P54C when using their
233 * integrated APIC (see 11AP erratum in "Pentium Processor
234 * Specification Update").
235 */
236 if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
237 (c->x86_mask < 0x6 || c->x86_mask == 0xb))
238 set_cpu_cap(c, X86_FEATURE_11AP);
239 315
240#ifdef CONFIG_X86_NUMAQ 316 detect_extended_topology(c);
241 numaq_tsc_disable(); 317 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
318 /*
319 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
320 * detection.
321 */
322 c->x86_max_cores = intel_num_cpu_cores(c);
323#ifdef CONFIG_X86_32
324 detect_ht(c);
242#endif 325#endif
326 }
327
328 /* Work around errata */
329 srat_detect_node();
330
331 if (cpu_has(c, X86_FEATURE_VMX))
332 detect_vmx_virtcap(c);
243} 333}
244 334
335#ifdef CONFIG_X86_32
245static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) 336static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
246{ 337{
247 /* 338 /*
@@ -254,10 +345,12 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i
254 size = 256; 345 size = 256;
255 return size; 346 return size;
256} 347}
348#endif
257 349
258static struct cpu_dev intel_cpu_dev __cpuinitdata = { 350static struct cpu_dev intel_cpu_dev __cpuinitdata = {
259 .c_vendor = "Intel", 351 .c_vendor = "Intel",
260 .c_ident = { "GenuineIntel" }, 352 .c_ident = { "GenuineIntel" },
353#ifdef CONFIG_X86_32
261 .c_models = { 354 .c_models = {
262 { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = 355 { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
263 { 356 {
@@ -307,76 +400,12 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = {
307 } 400 }
308 }, 401 },
309 }, 402 },
403 .c_size_cache = intel_size_cache,
404#endif
310 .c_early_init = early_init_intel, 405 .c_early_init = early_init_intel,
311 .c_init = init_intel, 406 .c_init = init_intel,
312 .c_size_cache = intel_size_cache, 407 .c_x86_vendor = X86_VENDOR_INTEL,
313}; 408};
314 409
315cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev); 410cpu_dev_register(intel_cpu_dev);
316
317#ifndef CONFIG_X86_CMPXCHG
318unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
319{
320 u8 prev;
321 unsigned long flags;
322
323 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
324 local_irq_save(flags);
325 prev = *(u8 *)ptr;
326 if (prev == old)
327 *(u8 *)ptr = new;
328 local_irq_restore(flags);
329 return prev;
330}
331EXPORT_SYMBOL(cmpxchg_386_u8);
332
333unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new)
334{
335 u16 prev;
336 unsigned long flags;
337
338 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
339 local_irq_save(flags);
340 prev = *(u16 *)ptr;
341 if (prev == old)
342 *(u16 *)ptr = new;
343 local_irq_restore(flags);
344 return prev;
345}
346EXPORT_SYMBOL(cmpxchg_386_u16);
347
348unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
349{
350 u32 prev;
351 unsigned long flags;
352
353 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
354 local_irq_save(flags);
355 prev = *(u32 *)ptr;
356 if (prev == old)
357 *(u32 *)ptr = new;
358 local_irq_restore(flags);
359 return prev;
360}
361EXPORT_SYMBOL(cmpxchg_386_u32);
362#endif
363
364#ifndef CONFIG_X86_CMPXCHG64
365unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
366{
367 u64 prev;
368 unsigned long flags;
369
370 /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
371 local_irq_save(flags);
372 prev = *(u64 *)ptr;
373 if (prev == old)
374 *(u64 *)ptr = new;
375 local_irq_restore(flags);
376 return prev;
377}
378EXPORT_SYMBOL(cmpxchg_486_u64);
379#endif
380
381/* arch_initcall(intel_cpu_init); */
382 411
diff --git a/arch/x86/kernel/cpu/intel_64.c b/arch/x86/kernel/cpu/intel_64.c
deleted file mode 100644
index 1019c58d39f0..000000000000
--- a/arch/x86/kernel/cpu/intel_64.c
+++ /dev/null
@@ -1,95 +0,0 @@
1#include <linux/init.h>
2#include <linux/smp.h>
3#include <asm/processor.h>
4#include <asm/ptrace.h>
5#include <asm/topology.h>
6#include <asm/numa_64.h>
7
8#include "cpu.h"
9
10static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
11{
12 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
13 (c->x86 == 0x6 && c->x86_model >= 0x0e))
14 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
15
16 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
17}
18
19/*
20 * find out the number of processor cores on the die
21 */
22static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
23{
24 unsigned int eax, t;
25
26 if (c->cpuid_level < 4)
27 return 1;
28
29 cpuid_count(4, 0, &eax, &t, &t, &t);
30
31 if (eax & 0x1f)
32 return ((eax >> 26) + 1);
33 else
34 return 1;
35}
36
37static void __cpuinit srat_detect_node(void)
38{
39#ifdef CONFIG_NUMA
40 unsigned node;
41 int cpu = smp_processor_id();
42 int apicid = hard_smp_processor_id();
43
44 /* Don't do the funky fallback heuristics the AMD version employs
45 for now. */
46 node = apicid_to_node[apicid];
47 if (node == NUMA_NO_NODE || !node_online(node))
48 node = first_node(node_online_map);
49 numa_set_node(cpu, node);
50
51 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
52#endif
53}
54
55static void __cpuinit init_intel(struct cpuinfo_x86 *c)
56{
57 init_intel_cacheinfo(c);
58 if (c->cpuid_level > 9) {
59 unsigned eax = cpuid_eax(10);
60 /* Check for version and the number of counters */
61 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
62 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
63 }
64
65 if (cpu_has_ds) {
66 unsigned int l1, l2;
67 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
68 if (!(l1 & (1<<11)))
69 set_cpu_cap(c, X86_FEATURE_BTS);
70 if (!(l1 & (1<<12)))
71 set_cpu_cap(c, X86_FEATURE_PEBS);
72 }
73
74
75 if (cpu_has_bts)
76 ds_init_intel(c);
77
78 if (c->x86 == 15)
79 c->x86_cache_alignment = c->x86_clflush_size * 2;
80 if (c->x86 == 6)
81 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
82 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
83 c->x86_max_cores = intel_num_cpu_cores(c);
84
85 srat_detect_node();
86}
87
88static struct cpu_dev intel_cpu_dev __cpuinitdata = {
89 .c_vendor = "Intel",
90 .c_ident = { "GenuineIntel" },
91 .c_early_init = early_init_intel,
92 .c_init = init_intel,
93};
94cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
95
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 6b0a10b002f1..3f46afbb1cf1 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * Routines to indentify caches on Intel CPU. 2 * Routines to indentify caches on Intel CPU.
3 * 3 *
4 * Changes: 4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4) 5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. 7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */ 8 */
@@ -13,6 +13,7 @@
13#include <linux/compiler.h> 13#include <linux/compiler.h>
14#include <linux/cpu.h> 14#include <linux/cpu.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/pci.h>
16 17
17#include <asm/processor.h> 18#include <asm/processor.h>
18#include <asm/smp.h> 19#include <asm/smp.h>
@@ -130,9 +131,18 @@ struct _cpuid4_info {
130 union _cpuid4_leaf_ebx ebx; 131 union _cpuid4_leaf_ebx ebx;
131 union _cpuid4_leaf_ecx ecx; 132 union _cpuid4_leaf_ecx ecx;
132 unsigned long size; 133 unsigned long size;
134 unsigned long can_disable;
133 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ 135 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
134}; 136};
135 137
138#ifdef CONFIG_PCI
139static struct pci_device_id k8_nb_id[] = {
140 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
141 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
142 {}
143};
144#endif
145
136unsigned short num_cache_leaves; 146unsigned short num_cache_leaves;
137 147
138/* AMD doesn't have CPUID4. Emulate it here to report the same 148/* AMD doesn't have CPUID4. Emulate it here to report the same
@@ -182,9 +192,10 @@ static unsigned short assocs[] __cpuinitdata = {
182static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; 192static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
183static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; 193static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
184 194
185static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, 195static void __cpuinit
186 union _cpuid4_leaf_ebx *ebx, 196amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
187 union _cpuid4_leaf_ecx *ecx) 197 union _cpuid4_leaf_ebx *ebx,
198 union _cpuid4_leaf_ecx *ecx)
188{ 199{
189 unsigned dummy; 200 unsigned dummy;
190 unsigned line_size, lines_per_tag, assoc, size_in_kb; 201 unsigned line_size, lines_per_tag, assoc, size_in_kb;
@@ -251,27 +262,40 @@ static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
251 (ebx->split.ways_of_associativity + 1) - 1; 262 (ebx->split.ways_of_associativity + 1) - 1;
252} 263}
253 264
254static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 265static void __cpuinit
266amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
267{
268 if (index < 3)
269 return;
270 this_leaf->can_disable = 1;
271}
272
273static int
274__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
255{ 275{
256 union _cpuid4_leaf_eax eax; 276 union _cpuid4_leaf_eax eax;
257 union _cpuid4_leaf_ebx ebx; 277 union _cpuid4_leaf_ebx ebx;
258 union _cpuid4_leaf_ecx ecx; 278 union _cpuid4_leaf_ecx ecx;
259 unsigned edx; 279 unsigned edx;
260 280
261 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 281 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
262 amd_cpuid4(index, &eax, &ebx, &ecx); 282 amd_cpuid4(index, &eax, &ebx, &ecx);
263 else 283 if (boot_cpu_data.x86 >= 0x10)
264 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); 284 amd_check_l3_disable(index, this_leaf);
285 } else {
286 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
287 }
288
265 if (eax.split.type == CACHE_TYPE_NULL) 289 if (eax.split.type == CACHE_TYPE_NULL)
266 return -EIO; /* better error ? */ 290 return -EIO; /* better error ? */
267 291
268 this_leaf->eax = eax; 292 this_leaf->eax = eax;
269 this_leaf->ebx = ebx; 293 this_leaf->ebx = ebx;
270 this_leaf->ecx = ecx; 294 this_leaf->ecx = ecx;
271 this_leaf->size = (ecx.split.number_of_sets + 1) * 295 this_leaf->size = (ecx.split.number_of_sets + 1) *
272 (ebx.split.coherency_line_size + 1) * 296 (ebx.split.coherency_line_size + 1) *
273 (ebx.split.physical_line_partition + 1) * 297 (ebx.split.physical_line_partition + 1) *
274 (ebx.split.ways_of_associativity + 1); 298 (ebx.split.ways_of_associativity + 1);
275 return 0; 299 return 0;
276} 300}
277 301
@@ -453,7 +477,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
453 477
454/* pointer to _cpuid4_info array (for each cache leaf) */ 478/* pointer to _cpuid4_info array (for each cache leaf) */
455static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); 479static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
456#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) 480#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
457 481
458#ifdef CONFIG_SMP 482#ifdef CONFIG_SMP
459static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 483static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
@@ -490,7 +514,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
490 514
491 this_leaf = CPUID4_INFO_IDX(cpu, index); 515 this_leaf = CPUID4_INFO_IDX(cpu, index);
492 for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { 516 for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
493 sibling_leaf = CPUID4_INFO_IDX(sibling, index); 517 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
494 cpu_clear(cpu, sibling_leaf->shared_cpu_map); 518 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
495 } 519 }
496} 520}
@@ -572,7 +596,7 @@ struct _index_kobject {
572 596
573/* pointer to array of kobjects for cpuX/cache/indexY */ 597/* pointer to array of kobjects for cpuX/cache/indexY */
574static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); 598static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
575#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) 599#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
576 600
577#define show_one_plus(file_name, object, val) \ 601#define show_one_plus(file_name, object, val) \
578static ssize_t show_##file_name \ 602static ssize_t show_##file_name \
@@ -637,6 +661,99 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
637 } 661 }
638} 662}
639 663
664#define to_object(k) container_of(k, struct _index_kobject, kobj)
665#define to_attr(a) container_of(a, struct _cache_attr, attr)
666
667#ifdef CONFIG_PCI
668static struct pci_dev *get_k8_northbridge(int node)
669{
670 struct pci_dev *dev = NULL;
671 int i;
672
673 for (i = 0; i <= node; i++) {
674 do {
675 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
676 if (!dev)
677 break;
678 } while (!pci_match_id(&k8_nb_id[0], dev));
679 if (!dev)
680 break;
681 }
682 return dev;
683}
684#else
685static struct pci_dev *get_k8_northbridge(int node)
686{
687 return NULL;
688}
689#endif
690
691static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
692{
693 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
694 struct pci_dev *dev = NULL;
695 ssize_t ret = 0;
696 int i;
697
698 if (!this_leaf->can_disable)
699 return sprintf(buf, "Feature not enabled\n");
700
701 dev = get_k8_northbridge(node);
702 if (!dev) {
703 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
704 return -EINVAL;
705 }
706
707 for (i = 0; i < 2; i++) {
708 unsigned int reg;
709
710 pci_read_config_dword(dev, 0x1BC + i * 4, &reg);
711
712 ret += sprintf(buf, "%sEntry: %d\n", buf, i);
713 ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n",
714 buf,
715 reg & 0x80000000 ? "Disabled" : "Allowed",
716 reg & 0x40000000 ? "Disabled" : "Allowed");
717 ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n",
718 buf, (reg & 0x30000) >> 16, reg & 0xfff);
719 }
720 return ret;
721}
722
723static ssize_t
724store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
725 size_t count)
726{
727 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
728 struct pci_dev *dev = NULL;
729 unsigned int ret, index, val;
730
731 if (!this_leaf->can_disable)
732 return 0;
733
734 if (strlen(buf) > 15)
735 return -EINVAL;
736
737 ret = sscanf(buf, "%x %x", &index, &val);
738 if (ret != 2)
739 return -EINVAL;
740 if (index > 1)
741 return -EINVAL;
742
743 val |= 0xc0000000;
744 dev = get_k8_northbridge(node);
745 if (!dev) {
746 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
747 return -EINVAL;
748 }
749
750 pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
751 wbinvd();
752 pci_write_config_dword(dev, 0x1BC + index * 4, val);
753
754 return 1;
755}
756
640struct _cache_attr { 757struct _cache_attr {
641 struct attribute attr; 758 struct attribute attr;
642 ssize_t (*show)(struct _cpuid4_info *, char *); 759 ssize_t (*show)(struct _cpuid4_info *, char *);
@@ -657,6 +774,8 @@ define_one_ro(size);
657define_one_ro(shared_cpu_map); 774define_one_ro(shared_cpu_map);
658define_one_ro(shared_cpu_list); 775define_one_ro(shared_cpu_list);
659 776
777static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
778
660static struct attribute * default_attrs[] = { 779static struct attribute * default_attrs[] = {
661 &type.attr, 780 &type.attr,
662 &level.attr, 781 &level.attr,
@@ -667,12 +786,10 @@ static struct attribute * default_attrs[] = {
667 &size.attr, 786 &size.attr,
668 &shared_cpu_map.attr, 787 &shared_cpu_map.attr,
669 &shared_cpu_list.attr, 788 &shared_cpu_list.attr,
789 &cache_disable.attr,
670 NULL 790 NULL
671}; 791};
672 792
673#define to_object(k) container_of(k, struct _index_kobject, kobj)
674#define to_attr(a) container_of(a, struct _cache_attr, attr)
675
676static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) 793static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
677{ 794{
678 struct _cache_attr *fattr = to_attr(attr); 795 struct _cache_attr *fattr = to_attr(attr);
@@ -682,14 +799,22 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
682 ret = fattr->show ? 799 ret = fattr->show ?
683 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), 800 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
684 buf) : 801 buf) :
685 0; 802 0;
686 return ret; 803 return ret;
687} 804}
688 805
689static ssize_t store(struct kobject * kobj, struct attribute * attr, 806static ssize_t store(struct kobject * kobj, struct attribute * attr,
690 const char * buf, size_t count) 807 const char * buf, size_t count)
691{ 808{
692 return 0; 809 struct _cache_attr *fattr = to_attr(attr);
810 struct _index_kobject *this_leaf = to_object(kobj);
811 ssize_t ret;
812
813 ret = fattr->store ?
814 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
815 buf, count) :
816 0;
817 return ret;
693} 818}
694 819
695static struct sysfs_ops sysfs_ops = { 820static struct sysfs_ops sysfs_ops = {
diff --git a/arch/x86/kernel/cpu/mcheck/k7.c b/arch/x86/kernel/cpu/mcheck/k7.c
index f390c9f66351..dd3af6e7b39a 100644
--- a/arch/x86/kernel/cpu/mcheck/k7.c
+++ b/arch/x86/kernel/cpu/mcheck/k7.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Athlon/Hammer specific Machine Check Exception Reporting 2 * Athlon specific Machine Check Exception Reporting
3 * (C) Copyright 2002 Dave Jones <davej@codemonkey.org.uk> 3 * (C) Copyright 2002 Dave Jones <davej@redhat.com>
4 */ 4 */
5 5
6#include <linux/init.h> 6#include <linux/init.h>
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c
index 774d87cfd8cd..0ebf3fc6a610 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_32.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_32.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * mce.c - x86 Machine Check Exception Reporting 2 * mce.c - x86 Machine Check Exception Reporting
3 * (c) 2002 Alan Cox <alan@redhat.com>, Dave Jones <davej@codemonkey.org.uk> 3 * (c) 2002 Alan Cox <alan@redhat.com>, Dave Jones <davej@redhat.com>
4 */ 4 */
5 5
6#include <linux/init.h> 6#include <linux/init.h>
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 726a5fcdf341..4b031a4ac856 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -860,7 +860,7 @@ error:
860 return err; 860 return err;
861} 861}
862 862
863static void mce_remove_device(unsigned int cpu) 863static __cpuinit void mce_remove_device(unsigned int cpu)
864{ 864{
865 int i; 865 int i;
866 866
diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c
index cc1fccdd31e0..a74af128efc9 100644
--- a/arch/x86/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Non Fatal Machine Check Exception Reporting 2 * Non Fatal Machine Check Exception Reporting
3 * 3 *
4 * (C) Copyright 2002 Dave Jones. <davej@codemonkey.org.uk> 4 * (C) Copyright 2002 Dave Jones. <davej@redhat.com>
5 * 5 *
6 * This file contains routines to check for non-fatal MCEs every 15s 6 * This file contains routines to check for non-fatal MCEs every 15s
7 * 7 *
diff --git a/arch/x86/kernel/cpu/mkcapflags.pl b/arch/x86/kernel/cpu/mkcapflags.pl
new file mode 100644
index 000000000000..dfea390e1608
--- /dev/null
+++ b/arch/x86/kernel/cpu/mkcapflags.pl
@@ -0,0 +1,32 @@
1#!/usr/bin/perl
2#
3# Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h
4#
5
6($in, $out) = @ARGV;
7
8open(IN, "< $in\0") or die "$0: cannot open: $in: $!\n";
9open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n";
10
11print OUT "#include <asm/cpufeature.h>\n\n";
12print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n";
13
14while (defined($line = <IN>)) {
15 if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) {
16 $macro = $1;
17 $feature = $2;
18 $tail = $3;
19 if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) {
20 $feature = $1;
21 }
22
23 if ($feature ne '') {
24 printf OUT "\t%-32s = \"%s\",\n",
25 "[$macro]", "\L$feature";
26 }
27 }
28}
29print OUT "};\n";
30
31close(IN);
32close(OUT);
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index cb7d3b6a80eb..4e8d77f01eeb 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -401,12 +401,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
401 tmp |= ~((1<<(hi - 1)) - 1); 401 tmp |= ~((1<<(hi - 1)) - 1);
402 402
403 if (tmp != mask_lo) { 403 if (tmp != mask_lo) {
404 static int once = 1; 404 WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
405
406 if (once) {
407 printk(KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
408 once = 0;
409 }
410 mask_lo = tmp; 405 mask_lo = tmp;
411 } 406 }
412 } 407 }
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 84c480bb3715..4c4214690dd1 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -405,9 +405,9 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
405 } 405 }
406 /* RED-PEN: base can be > 32bit */ 406 /* RED-PEN: base can be > 32bit */
407 len += seq_printf(seq, 407 len += seq_printf(seq,
408 "reg%02i: base=0x%05lx000 (%4luMB), size=%4lu%cB: %s, count=%d\n", 408 "reg%02i: base=0x%06lx000 (%5luMB), size=%5lu%cB, count=%d: %s\n",
409 i, base, base >> (20 - PAGE_SHIFT), size, factor, 409 i, base, base >> (20 - PAGE_SHIFT), size, factor,
410 mtrr_attrib_to_str(type), mtrr_usage_table[i]); 410 mtrr_usage_table[i], mtrr_attrib_to_str(type));
411 } 411 }
412 } 412 }
413 return 0; 413 return 0;
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 885c8265e6b5..c78c04821ea1 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -729,7 +729,7 @@ struct var_mtrr_range_state {
729 mtrr_type type; 729 mtrr_type type;
730}; 730};
731 731
732struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; 732static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
733static int __initdata debug_print; 733static int __initdata debug_print;
734 734
735static int __init 735static int __init
@@ -759,7 +759,8 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
759 /* take out UC ranges */ 759 /* take out UC ranges */
760 for (i = 0; i < num_var_ranges; i++) { 760 for (i = 0; i < num_var_ranges; i++) {
761 type = range_state[i].type; 761 type = range_state[i].type;
762 if (type != MTRR_TYPE_UNCACHABLE) 762 if (type != MTRR_TYPE_UNCACHABLE &&
763 type != MTRR_TYPE_WRPROT)
763 continue; 764 continue;
764 size = range_state[i].size_pfn; 765 size = range_state[i].size_pfn;
765 if (!size) 766 if (!size)
@@ -836,6 +837,13 @@ static int __init enable_mtrr_cleanup_setup(char *str)
836} 837}
837early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup); 838early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup);
838 839
840static int __init mtrr_cleanup_debug_setup(char *str)
841{
842 debug_print = 1;
843 return 0;
844}
845early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup);
846
839struct var_mtrr_state { 847struct var_mtrr_state {
840 unsigned long range_startk; 848 unsigned long range_startk;
841 unsigned long range_sizek; 849 unsigned long range_sizek;
@@ -898,6 +906,27 @@ set_var_mtrr_all(unsigned int address_bits)
898 } 906 }
899} 907}
900 908
909static unsigned long to_size_factor(unsigned long sizek, char *factorp)
910{
911 char factor;
912 unsigned long base = sizek;
913
914 if (base & ((1<<10) - 1)) {
915 /* not MB alignment */
916 factor = 'K';
917 } else if (base & ((1<<20) - 1)){
918 factor = 'M';
919 base >>= 10;
920 } else {
921 factor = 'G';
922 base >>= 20;
923 }
924
925 *factorp = factor;
926
927 return base;
928}
929
901static unsigned int __init 930static unsigned int __init
902range_to_mtrr(unsigned int reg, unsigned long range_startk, 931range_to_mtrr(unsigned int reg, unsigned long range_startk,
903 unsigned long range_sizek, unsigned char type) 932 unsigned long range_sizek, unsigned char type)
@@ -919,13 +948,21 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
919 align = max_align; 948 align = max_align;
920 949
921 sizek = 1 << align; 950 sizek = 1 << align;
922 if (debug_print) 951 if (debug_print) {
952 char start_factor = 'K', size_factor = 'K';
953 unsigned long start_base, size_base;
954
955 start_base = to_size_factor(range_startk, &start_factor),
956 size_base = to_size_factor(sizek, &size_factor),
957
923 printk(KERN_DEBUG "Setting variable MTRR %d, " 958 printk(KERN_DEBUG "Setting variable MTRR %d, "
924 "base: %ldMB, range: %ldMB, type %s\n", 959 "base: %ld%cB, range: %ld%cB, type %s\n",
925 reg, range_startk >> 10, sizek >> 10, 960 reg, start_base, start_factor,
961 size_base, size_factor,
926 (type == MTRR_TYPE_UNCACHABLE)?"UC": 962 (type == MTRR_TYPE_UNCACHABLE)?"UC":
927 ((type == MTRR_TYPE_WRBACK)?"WB":"Other") 963 ((type == MTRR_TYPE_WRBACK)?"WB":"Other")
928 ); 964 );
965 }
929 save_var_mtrr(reg++, range_startk, sizek, type); 966 save_var_mtrr(reg++, range_startk, sizek, type);
930 range_startk += sizek; 967 range_startk += sizek;
931 range_sizek -= sizek; 968 range_sizek -= sizek;
@@ -970,6 +1007,8 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
970 /* try to append some small hole */ 1007 /* try to append some small hole */
971 range0_basek = state->range_startk; 1008 range0_basek = state->range_startk;
972 range0_sizek = ALIGN(state->range_sizek, chunk_sizek); 1009 range0_sizek = ALIGN(state->range_sizek, chunk_sizek);
1010
1011 /* no increase */
973 if (range0_sizek == state->range_sizek) { 1012 if (range0_sizek == state->range_sizek) {
974 if (debug_print) 1013 if (debug_print)
975 printk(KERN_DEBUG "rangeX: %016lx - %016lx\n", 1014 printk(KERN_DEBUG "rangeX: %016lx - %016lx\n",
@@ -980,13 +1019,40 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
980 return 0; 1019 return 0;
981 } 1020 }
982 1021
983 range0_sizek -= chunk_sizek; 1022 /* only cut back, when it is not the last */
984 if (range0_sizek && sizek) { 1023 if (sizek) {
985 while (range0_basek + range0_sizek > (basek + sizek)) { 1024 while (range0_basek + range0_sizek > (basek + sizek)) {
986 range0_sizek -= chunk_sizek; 1025 if (range0_sizek >= chunk_sizek)
987 if (!range0_sizek) 1026 range0_sizek -= chunk_sizek;
988 break; 1027 else
989 } 1028 range0_sizek = 0;
1029
1030 if (!range0_sizek)
1031 break;
1032 }
1033 }
1034
1035second_try:
1036 range_basek = range0_basek + range0_sizek;
1037
1038 /* one hole in the middle */
1039 if (range_basek > basek && range_basek <= (basek + sizek))
1040 second_sizek = range_basek - basek;
1041
1042 if (range0_sizek > state->range_sizek) {
1043
1044 /* one hole in middle or at end */
1045 hole_sizek = range0_sizek - state->range_sizek - second_sizek;
1046
1047 /* hole size should be less than half of range0 size */
1048 if (hole_sizek >= (range0_sizek >> 1) &&
1049 range0_sizek >= chunk_sizek) {
1050 range0_sizek -= chunk_sizek;
1051 second_sizek = 0;
1052 hole_sizek = 0;
1053
1054 goto second_try;
1055 }
990 } 1056 }
991 1057
992 if (range0_sizek) { 1058 if (range0_sizek) {
@@ -996,50 +1062,28 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
996 (range0_basek + range0_sizek)<<10); 1062 (range0_basek + range0_sizek)<<10);
997 state->reg = range_to_mtrr(state->reg, range0_basek, 1063 state->reg = range_to_mtrr(state->reg, range0_basek,
998 range0_sizek, MTRR_TYPE_WRBACK); 1064 range0_sizek, MTRR_TYPE_WRBACK);
999
1000 }
1001
1002 range_basek = range0_basek + range0_sizek;
1003 range_sizek = chunk_sizek;
1004
1005 if (range_basek + range_sizek > basek &&
1006 range_basek + range_sizek <= (basek + sizek)) {
1007 /* one hole */
1008 second_basek = basek;
1009 second_sizek = range_basek + range_sizek - basek;
1010 } 1065 }
1011 1066
1012 /* if last piece, only could one hole near end */ 1067 if (range0_sizek < state->range_sizek) {
1013 if ((second_basek || !basek) && 1068 /* need to handle left over */
1014 range_sizek - (state->range_sizek - range0_sizek) - second_sizek <
1015 (chunk_sizek >> 1)) {
1016 /*
1017 * one hole in middle (second_sizek is 0) or at end
1018 * (second_sizek is 0 )
1019 */
1020 hole_sizek = range_sizek - (state->range_sizek - range0_sizek)
1021 - second_sizek;
1022 hole_basek = range_basek + range_sizek - hole_sizek
1023 - second_sizek;
1024 } else {
1025 /* fallback for big hole, or several holes */
1026 range_sizek = state->range_sizek - range0_sizek; 1069 range_sizek = state->range_sizek - range0_sizek;
1027 second_basek = 0; 1070
1028 second_sizek = 0; 1071 if (debug_print)
1072 printk(KERN_DEBUG "range: %016lx - %016lx\n",
1073 range_basek<<10,
1074 (range_basek + range_sizek)<<10);
1075 state->reg = range_to_mtrr(state->reg, range_basek,
1076 range_sizek, MTRR_TYPE_WRBACK);
1029 } 1077 }
1030 1078
1031 if (debug_print)
1032 printk(KERN_DEBUG "range: %016lx - %016lx\n", range_basek<<10,
1033 (range_basek + range_sizek)<<10);
1034 state->reg = range_to_mtrr(state->reg, range_basek, range_sizek,
1035 MTRR_TYPE_WRBACK);
1036 if (hole_sizek) { 1079 if (hole_sizek) {
1080 hole_basek = range_basek - hole_sizek - second_sizek;
1037 if (debug_print) 1081 if (debug_print)
1038 printk(KERN_DEBUG "hole: %016lx - %016lx\n", 1082 printk(KERN_DEBUG "hole: %016lx - %016lx\n",
1039 hole_basek<<10, (hole_basek + hole_sizek)<<10); 1083 hole_basek<<10,
1040 state->reg = range_to_mtrr(state->reg, hole_basek, hole_sizek, 1084 (hole_basek + hole_sizek)<<10);
1041 MTRR_TYPE_UNCACHABLE); 1085 state->reg = range_to_mtrr(state->reg, hole_basek,
1042 1086 hole_sizek, MTRR_TYPE_UNCACHABLE);
1043 } 1087 }
1044 1088
1045 return second_sizek; 1089 return second_sizek;
@@ -1154,11 +1198,11 @@ struct mtrr_cleanup_result {
1154}; 1198};
1155 1199
1156/* 1200/*
1157 * gran_size: 1M, 2M, ..., 2G 1201 * gran_size: 64K, 128K, 256K, 512K, 1M, 2M, ..., 2G
1158 * chunk size: gran_size, ..., 4G 1202 * chunk size: gran_size, ..., 2G
1159 * so we need (2+13)*6 1203 * so we need (1+16)*8
1160 */ 1204 */
1161#define NUM_RESULT 90 1205#define NUM_RESULT 136
1162#define PSHIFT (PAGE_SHIFT - 10) 1206#define PSHIFT (PAGE_SHIFT - 10)
1163 1207
1164static struct mtrr_cleanup_result __initdata result[NUM_RESULT]; 1208static struct mtrr_cleanup_result __initdata result[NUM_RESULT];
@@ -1168,13 +1212,14 @@ static unsigned long __initdata min_loss_pfn[RANGE_NUM];
1168static int __init mtrr_cleanup(unsigned address_bits) 1212static int __init mtrr_cleanup(unsigned address_bits)
1169{ 1213{
1170 unsigned long extra_remove_base, extra_remove_size; 1214 unsigned long extra_remove_base, extra_remove_size;
1171 unsigned long i, base, size, def, dummy; 1215 unsigned long base, size, def, dummy;
1172 mtrr_type type; 1216 mtrr_type type;
1173 int nr_range, nr_range_new; 1217 int nr_range, nr_range_new;
1174 u64 chunk_size, gran_size; 1218 u64 chunk_size, gran_size;
1175 unsigned long range_sums, range_sums_new; 1219 unsigned long range_sums, range_sums_new;
1176 int index_good; 1220 int index_good;
1177 int num_reg_good; 1221 int num_reg_good;
1222 int i;
1178 1223
1179 /* extra one for all 0 */ 1224 /* extra one for all 0 */
1180 int num[MTRR_NUM_TYPES + 1]; 1225 int num[MTRR_NUM_TYPES + 1];
@@ -1204,6 +1249,8 @@ static int __init mtrr_cleanup(unsigned address_bits)
1204 continue; 1249 continue;
1205 if (!size) 1250 if (!size)
1206 type = MTRR_NUM_TYPES; 1251 type = MTRR_NUM_TYPES;
1252 if (type == MTRR_TYPE_WRPROT)
1253 type = MTRR_TYPE_UNCACHABLE;
1207 num[type]++; 1254 num[type]++;
1208 } 1255 }
1209 1256
@@ -1216,23 +1263,57 @@ static int __init mtrr_cleanup(unsigned address_bits)
1216 num_var_ranges - num[MTRR_NUM_TYPES]) 1263 num_var_ranges - num[MTRR_NUM_TYPES])
1217 return 0; 1264 return 0;
1218 1265
1266 /* print original var MTRRs at first, for debugging: */
1267 printk(KERN_DEBUG "original variable MTRRs\n");
1268 for (i = 0; i < num_var_ranges; i++) {
1269 char start_factor = 'K', size_factor = 'K';
1270 unsigned long start_base, size_base;
1271
1272 size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10);
1273 if (!size_base)
1274 continue;
1275
1276 size_base = to_size_factor(size_base, &size_factor),
1277 start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10);
1278 start_base = to_size_factor(start_base, &start_factor),
1279 type = range_state[i].type;
1280
1281 printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
1282 i, start_base, start_factor,
1283 size_base, size_factor,
1284 (type == MTRR_TYPE_UNCACHABLE) ? "UC" :
1285 ((type == MTRR_TYPE_WRPROT) ? "WP" :
1286 ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other"))
1287 );
1288 }
1289
1219 memset(range, 0, sizeof(range)); 1290 memset(range, 0, sizeof(range));
1220 extra_remove_size = 0; 1291 extra_remove_size = 0;
1221 if (mtrr_tom2) { 1292 extra_remove_base = 1 << (32 - PAGE_SHIFT);
1222 extra_remove_base = 1 << (32 - PAGE_SHIFT); 1293 if (mtrr_tom2)
1223 extra_remove_size = 1294 extra_remove_size =
1224 (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base; 1295 (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base;
1225 }
1226 nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, 1296 nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base,
1227 extra_remove_size); 1297 extra_remove_size);
1298 /*
1299 * [0, 1M) should always be coverred by var mtrr with WB
1300 * and fixed mtrrs should take effective before var mtrr for it
1301 */
1302 nr_range = add_range_with_merge(range, nr_range, 0,
1303 (1ULL<<(20 - PAGE_SHIFT)) - 1);
1304 /* sort the ranges */
1305 sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
1306
1228 range_sums = sum_ranges(range, nr_range); 1307 range_sums = sum_ranges(range, nr_range);
1229 printk(KERN_INFO "total RAM coverred: %ldM\n", 1308 printk(KERN_INFO "total RAM coverred: %ldM\n",
1230 range_sums >> (20 - PAGE_SHIFT)); 1309 range_sums >> (20 - PAGE_SHIFT));
1231 1310
1232 if (mtrr_chunk_size && mtrr_gran_size) { 1311 if (mtrr_chunk_size && mtrr_gran_size) {
1233 int num_reg; 1312 int num_reg;
1313 char gran_factor, chunk_factor, lose_factor;
1314 unsigned long gran_base, chunk_base, lose_base;
1234 1315
1235 debug_print = 1; 1316 debug_print++;
1236 /* convert ranges to var ranges state */ 1317 /* convert ranges to var ranges state */
1237 num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size, 1318 num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size,
1238 mtrr_gran_size); 1319 mtrr_gran_size);
@@ -1256,34 +1337,48 @@ static int __init mtrr_cleanup(unsigned address_bits)
1256 result[i].lose_cover_sizek = 1337 result[i].lose_cover_sizek =
1257 (range_sums - range_sums_new) << PSHIFT; 1338 (range_sums - range_sums_new) << PSHIFT;
1258 1339
1259 printk(KERN_INFO "%sgran_size: %ldM \tchunk_size: %ldM \t", 1340 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
1260 result[i].bad?"*BAD*":" ", result[i].gran_sizek >> 10, 1341 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
1261 result[i].chunk_sizek >> 10); 1342 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
1262 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ldM \n", 1343 printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
1344 result[i].bad?"*BAD*":" ",
1345 gran_base, gran_factor, chunk_base, chunk_factor);
1346 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
1263 result[i].num_reg, result[i].bad?"-":"", 1347 result[i].num_reg, result[i].bad?"-":"",
1264 result[i].lose_cover_sizek >> 10); 1348 lose_base, lose_factor);
1265 if (!result[i].bad) { 1349 if (!result[i].bad) {
1266 set_var_mtrr_all(address_bits); 1350 set_var_mtrr_all(address_bits);
1267 return 1; 1351 return 1;
1268 } 1352 }
1269 printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, " 1353 printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, "
1270 "will find optimal one\n"); 1354 "will find optimal one\n");
1271 debug_print = 0; 1355 debug_print--;
1272 memset(result, 0, sizeof(result[0])); 1356 memset(result, 0, sizeof(result[0]));
1273 } 1357 }
1274 1358
1275 i = 0; 1359 i = 0;
1276 memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn)); 1360 memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn));
1277 memset(result, 0, sizeof(result)); 1361 memset(result, 0, sizeof(result));
1278 for (gran_size = (1ULL<<20); gran_size < (1ULL<<32); gran_size <<= 1) { 1362 for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) {
1279 for (chunk_size = gran_size; chunk_size < (1ULL<<33); 1363 char gran_factor;
1364 unsigned long gran_base;
1365
1366 if (debug_print)
1367 gran_base = to_size_factor(gran_size >> 10, &gran_factor);
1368
1369 for (chunk_size = gran_size; chunk_size < (1ULL<<32);
1280 chunk_size <<= 1) { 1370 chunk_size <<= 1) {
1281 int num_reg; 1371 int num_reg;
1282 1372
1283 if (debug_print) 1373 if (debug_print) {
1284 printk(KERN_INFO 1374 char chunk_factor;
1285 "\ngran_size: %lldM chunk_size_size: %lldM\n", 1375 unsigned long chunk_base;
1286 gran_size >> 20, chunk_size >> 20); 1376
1377 chunk_base = to_size_factor(chunk_size>>10, &chunk_factor),
1378 printk(KERN_INFO "\n");
1379 printk(KERN_INFO "gran_size: %ld%c chunk_size: %ld%c \n",
1380 gran_base, gran_factor, chunk_base, chunk_factor);
1381 }
1287 if (i >= NUM_RESULT) 1382 if (i >= NUM_RESULT)
1288 continue; 1383 continue;
1289 1384
@@ -1326,12 +1421,18 @@ static int __init mtrr_cleanup(unsigned address_bits)
1326 1421
1327 /* print out all */ 1422 /* print out all */
1328 for (i = 0; i < NUM_RESULT; i++) { 1423 for (i = 0; i < NUM_RESULT; i++) {
1329 printk(KERN_INFO "%sgran_size: %ldM \tchunk_size: %ldM \t", 1424 char gran_factor, chunk_factor, lose_factor;
1330 result[i].bad?"*BAD* ":" ", result[i].gran_sizek >> 10, 1425 unsigned long gran_base, chunk_base, lose_base;
1331 result[i].chunk_sizek >> 10); 1426
1332 printk(KERN_CONT "num_reg: %d \tlose RAM: %s%ldM\n", 1427 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
1333 result[i].num_reg, result[i].bad?"-":"", 1428 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
1334 result[i].lose_cover_sizek >> 10); 1429 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
1430 printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
1431 result[i].bad?"*BAD*":" ",
1432 gran_base, gran_factor, chunk_base, chunk_factor);
1433 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
1434 result[i].num_reg, result[i].bad?"-":"",
1435 lose_base, lose_factor);
1335 } 1436 }
1336 1437
1337 /* try to find the optimal index */ 1438 /* try to find the optimal index */
@@ -1339,10 +1440,8 @@ static int __init mtrr_cleanup(unsigned address_bits)
1339 nr_mtrr_spare_reg = num_var_ranges - 1; 1440 nr_mtrr_spare_reg = num_var_ranges - 1;
1340 num_reg_good = -1; 1441 num_reg_good = -1;
1341 for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { 1442 for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
1342 if (!min_loss_pfn[i]) { 1443 if (!min_loss_pfn[i])
1343 num_reg_good = i; 1444 num_reg_good = i;
1344 break;
1345 }
1346 } 1445 }
1347 1446
1348 index_good = -1; 1447 index_good = -1;
@@ -1358,21 +1457,26 @@ static int __init mtrr_cleanup(unsigned address_bits)
1358 } 1457 }
1359 1458
1360 if (index_good != -1) { 1459 if (index_good != -1) {
1460 char gran_factor, chunk_factor, lose_factor;
1461 unsigned long gran_base, chunk_base, lose_base;
1462
1361 printk(KERN_INFO "Found optimal setting for mtrr clean up\n"); 1463 printk(KERN_INFO "Found optimal setting for mtrr clean up\n");
1362 i = index_good; 1464 i = index_good;
1363 printk(KERN_INFO "gran_size: %ldM \tchunk_size: %ldM \t", 1465 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
1364 result[i].gran_sizek >> 10, 1466 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
1365 result[i].chunk_sizek >> 10); 1467 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
1366 printk(KERN_CONT "num_reg: %d \tlose RAM: %ldM\n", 1468 printk(KERN_INFO "gran_size: %ld%c \tchunk_size: %ld%c \t",
1367 result[i].num_reg, 1469 gran_base, gran_factor, chunk_base, chunk_factor);
1368 result[i].lose_cover_sizek >> 10); 1470 printk(KERN_CONT "num_reg: %d \tlose RAM: %ld%c\n",
1471 result[i].num_reg, lose_base, lose_factor);
1369 /* convert ranges to var ranges state */ 1472 /* convert ranges to var ranges state */
1370 chunk_size = result[i].chunk_sizek; 1473 chunk_size = result[i].chunk_sizek;
1371 chunk_size <<= 10; 1474 chunk_size <<= 10;
1372 gran_size = result[i].gran_sizek; 1475 gran_size = result[i].gran_sizek;
1373 gran_size <<= 10; 1476 gran_size <<= 10;
1374 debug_print = 1; 1477 debug_print++;
1375 x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); 1478 x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
1479 debug_print--;
1376 set_var_mtrr_all(address_bits); 1480 set_var_mtrr_all(address_bits);
1377 return 1; 1481 return 1;
1378 } 1482 }
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 05cc22dbd4ff..9abd48b22674 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -17,6 +17,8 @@
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/nmi.h> 19#include <linux/nmi.h>
20#include <linux/kprobes.h>
21
20#include <asm/apic.h> 22#include <asm/apic.h>
21#include <asm/intel_arch_perfmon.h> 23#include <asm/intel_arch_perfmon.h>
22 24
@@ -295,13 +297,19 @@ static int setup_k7_watchdog(unsigned nmi_hz)
295 /* setup the timer */ 297 /* setup the timer */
296 wrmsr(evntsel_msr, evntsel, 0); 298 wrmsr(evntsel_msr, evntsel, 0);
297 write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz); 299 write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz);
298 apic_write(APIC_LVTPC, APIC_DM_NMI);
299 evntsel |= K7_EVNTSEL_ENABLE;
300 wrmsr(evntsel_msr, evntsel, 0);
301 300
301 /* initialize the wd struct before enabling */
302 wd->perfctr_msr = perfctr_msr; 302 wd->perfctr_msr = perfctr_msr;
303 wd->evntsel_msr = evntsel_msr; 303 wd->evntsel_msr = evntsel_msr;
304 wd->cccr_msr = 0; /* unused */ 304 wd->cccr_msr = 0; /* unused */
305
306 /* ok, everything is initialized, announce that we're set */
307 cpu_nmi_set_wd_enabled();
308
309 apic_write(APIC_LVTPC, APIC_DM_NMI);
310 evntsel |= K7_EVNTSEL_ENABLE;
311 wrmsr(evntsel_msr, evntsel, 0);
312
305 return 1; 313 return 1;
306} 314}
307 315
@@ -330,7 +338,8 @@ static void single_msr_unreserve(void)
330 release_perfctr_nmi(wd_ops->perfctr); 338 release_perfctr_nmi(wd_ops->perfctr);
331} 339}
332 340
333static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) 341static void __kprobes
342single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
334{ 343{
335 /* start the cycle over again */ 344 /* start the cycle over again */
336 write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); 345 write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
@@ -379,17 +388,23 @@ static int setup_p6_watchdog(unsigned nmi_hz)
379 wrmsr(evntsel_msr, evntsel, 0); 388 wrmsr(evntsel_msr, evntsel, 0);
380 nmi_hz = adjust_for_32bit_ctr(nmi_hz); 389 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
381 write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz); 390 write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz);
382 apic_write(APIC_LVTPC, APIC_DM_NMI);
383 evntsel |= P6_EVNTSEL0_ENABLE;
384 wrmsr(evntsel_msr, evntsel, 0);
385 391
392 /* initialize the wd struct before enabling */
386 wd->perfctr_msr = perfctr_msr; 393 wd->perfctr_msr = perfctr_msr;
387 wd->evntsel_msr = evntsel_msr; 394 wd->evntsel_msr = evntsel_msr;
388 wd->cccr_msr = 0; /* unused */ 395 wd->cccr_msr = 0; /* unused */
396
397 /* ok, everything is initialized, announce that we're set */
398 cpu_nmi_set_wd_enabled();
399
400 apic_write(APIC_LVTPC, APIC_DM_NMI);
401 evntsel |= P6_EVNTSEL0_ENABLE;
402 wrmsr(evntsel_msr, evntsel, 0);
403
389 return 1; 404 return 1;
390} 405}
391 406
392static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) 407static void __kprobes p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
393{ 408{
394 /* 409 /*
395 * P6 based Pentium M need to re-unmask 410 * P6 based Pentium M need to re-unmask
@@ -432,6 +447,27 @@ static const struct wd_ops p6_wd_ops = {
432#define P4_CCCR_ENABLE (1 << 12) 447#define P4_CCCR_ENABLE (1 << 12)
433#define P4_CCCR_OVF (1 << 31) 448#define P4_CCCR_OVF (1 << 31)
434 449
450#define P4_CONTROLS 18
451static unsigned int p4_controls[18] = {
452 MSR_P4_BPU_CCCR0,
453 MSR_P4_BPU_CCCR1,
454 MSR_P4_BPU_CCCR2,
455 MSR_P4_BPU_CCCR3,
456 MSR_P4_MS_CCCR0,
457 MSR_P4_MS_CCCR1,
458 MSR_P4_MS_CCCR2,
459 MSR_P4_MS_CCCR3,
460 MSR_P4_FLAME_CCCR0,
461 MSR_P4_FLAME_CCCR1,
462 MSR_P4_FLAME_CCCR2,
463 MSR_P4_FLAME_CCCR3,
464 MSR_P4_IQ_CCCR0,
465 MSR_P4_IQ_CCCR1,
466 MSR_P4_IQ_CCCR2,
467 MSR_P4_IQ_CCCR3,
468 MSR_P4_IQ_CCCR4,
469 MSR_P4_IQ_CCCR5,
470};
435/* 471/*
436 * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter 472 * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
437 * CRU_ESCR0 (with any non-null event selector) through a complemented 473 * CRU_ESCR0 (with any non-null event selector) through a complemented
@@ -473,6 +509,26 @@ static int setup_p4_watchdog(unsigned nmi_hz)
473 evntsel_msr = MSR_P4_CRU_ESCR0; 509 evntsel_msr = MSR_P4_CRU_ESCR0;
474 cccr_msr = MSR_P4_IQ_CCCR0; 510 cccr_msr = MSR_P4_IQ_CCCR0;
475 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); 511 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
512
513 /*
514 * If we're on the kdump kernel or other situation, we may
515 * still have other performance counter registers set to
516 * interrupt and they'll keep interrupting forever because
517 * of the P4_CCCR_OVF quirk. So we need to ACK all the
518 * pending interrupts and disable all the registers here,
519 * before reenabling the NMI delivery. Refer to p4_rearm()
520 * about the P4_CCCR_OVF quirk.
521 */
522 if (reset_devices) {
523 unsigned int low, high;
524 int i;
525
526 for (i = 0; i < P4_CONTROLS; i++) {
527 rdmsr(p4_controls[i], low, high);
528 low &= ~(P4_CCCR_ENABLE | P4_CCCR_OVF);
529 wrmsr(p4_controls[i], low, high);
530 }
531 }
476 } else { 532 } else {
477 /* logical cpu 1 */ 533 /* logical cpu 1 */
478 perfctr_msr = MSR_P4_IQ_PERFCTR1; 534 perfctr_msr = MSR_P4_IQ_PERFCTR1;
@@ -499,12 +555,17 @@ static int setup_p4_watchdog(unsigned nmi_hz)
499 wrmsr(evntsel_msr, evntsel, 0); 555 wrmsr(evntsel_msr, evntsel, 0);
500 wrmsr(cccr_msr, cccr_val, 0); 556 wrmsr(cccr_msr, cccr_val, 0);
501 write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz); 557 write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz);
502 apic_write(APIC_LVTPC, APIC_DM_NMI); 558
503 cccr_val |= P4_CCCR_ENABLE;
504 wrmsr(cccr_msr, cccr_val, 0);
505 wd->perfctr_msr = perfctr_msr; 559 wd->perfctr_msr = perfctr_msr;
506 wd->evntsel_msr = evntsel_msr; 560 wd->evntsel_msr = evntsel_msr;
507 wd->cccr_msr = cccr_msr; 561 wd->cccr_msr = cccr_msr;
562
563 /* ok, everything is initialized, announce that we're set */
564 cpu_nmi_set_wd_enabled();
565
566 apic_write(APIC_LVTPC, APIC_DM_NMI);
567 cccr_val |= P4_CCCR_ENABLE;
568 wrmsr(cccr_msr, cccr_val, 0);
508 return 1; 569 return 1;
509} 570}
510 571
@@ -547,7 +608,7 @@ static void p4_unreserve(void)
547 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0); 608 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0);
548} 609}
549 610
550static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) 611static void __kprobes p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
551{ 612{
552 unsigned dummy; 613 unsigned dummy;
553 /* 614 /*
@@ -620,13 +681,17 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
620 wrmsr(evntsel_msr, evntsel, 0); 681 wrmsr(evntsel_msr, evntsel, 0);
621 nmi_hz = adjust_for_32bit_ctr(nmi_hz); 682 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
622 write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz); 683 write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz);
623 apic_write(APIC_LVTPC, APIC_DM_NMI);
624 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
625 wrmsr(evntsel_msr, evntsel, 0);
626 684
627 wd->perfctr_msr = perfctr_msr; 685 wd->perfctr_msr = perfctr_msr;
628 wd->evntsel_msr = evntsel_msr; 686 wd->evntsel_msr = evntsel_msr;
629 wd->cccr_msr = 0; /* unused */ 687 wd->cccr_msr = 0; /* unused */
688
689 /* ok, everything is initialized, announce that we're set */
690 cpu_nmi_set_wd_enabled();
691
692 apic_write(APIC_LVTPC, APIC_DM_NMI);
693 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
694 wrmsr(evntsel_msr, evntsel, 0);
630 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); 695 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1);
631 return 1; 696 return 1;
632} 697}
@@ -722,7 +787,7 @@ unsigned lapic_adjust_nmi_hz(unsigned hz)
722 return hz; 787 return hz;
723} 788}
724 789
725int lapic_wd_event(unsigned nmi_hz) 790int __kprobes lapic_wd_event(unsigned nmi_hz)
726{ 791{
727 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 792 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
728 u64 ctr; 793 u64 ctr;
diff --git a/arch/x86/kernel/cpu/powerflags.c b/arch/x86/kernel/cpu/powerflags.c
new file mode 100644
index 000000000000..5abbea297e0c
--- /dev/null
+++ b/arch/x86/kernel/cpu/powerflags.c
@@ -0,0 +1,20 @@
1/*
2 * Strings for the various x86 power flags
3 *
4 * This file must not contain any executable code.
5 */
6
7#include <asm/cpufeature.h>
8
9const char *const x86_power_flags[32] = {
10 "ts", /* temperature sensor */
11 "fid", /* frequency id control */
12 "vid", /* voltage id control */
13 "ttp", /* thermal trip */
14 "tm",
15 "stc",
16 "100mhzsteps",
17 "hwpstate",
18 "", /* tsc invariant mapped to constant_tsc */
19 /* nothing */
20};
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index b911a2c61b8f..52b3fefbd5af 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -5,6 +5,18 @@
5#include <asm/msr.h> 5#include <asm/msr.h>
6#include "cpu.h" 6#include "cpu.h"
7 7
8static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c)
9{
10 u32 xlvl;
11
12 /* Transmeta-defined flags: level 0x80860001 */
13 xlvl = cpuid_eax(0x80860000);
14 if ((xlvl & 0xffff0000) == 0x80860000) {
15 if (xlvl >= 0x80860001)
16 c->x86_capability[2] = cpuid_edx(0x80860001);
17 }
18}
19
8static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) 20static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
9{ 21{
10 unsigned int cap_mask, uk, max, dummy; 22 unsigned int cap_mask, uk, max, dummy;
@@ -12,7 +24,8 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
12 unsigned int cpu_rev, cpu_freq = 0, cpu_flags, new_cpu_rev; 24 unsigned int cpu_rev, cpu_freq = 0, cpu_flags, new_cpu_rev;
13 char cpu_info[65]; 25 char cpu_info[65];
14 26
15 get_model_name(c); /* Same as AMD/Cyrix */ 27 early_init_transmeta(c);
28
16 display_cacheinfo(c); 29 display_cacheinfo(c);
17 30
18 /* Print CMS and CPU revision */ 31 /* Print CMS and CPU revision */
@@ -85,23 +98,12 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
85#endif 98#endif
86} 99}
87 100
88static void __cpuinit transmeta_identify(struct cpuinfo_x86 *c)
89{
90 u32 xlvl;
91
92 /* Transmeta-defined flags: level 0x80860001 */
93 xlvl = cpuid_eax(0x80860000);
94 if ((xlvl & 0xffff0000) == 0x80860000) {
95 if (xlvl >= 0x80860001)
96 c->x86_capability[2] = cpuid_edx(0x80860001);
97 }
98}
99
100static struct cpu_dev transmeta_cpu_dev __cpuinitdata = { 101static struct cpu_dev transmeta_cpu_dev __cpuinitdata = {
101 .c_vendor = "Transmeta", 102 .c_vendor = "Transmeta",
102 .c_ident = { "GenuineTMx86", "TransmetaCPU" }, 103 .c_ident = { "GenuineTMx86", "TransmetaCPU" },
104 .c_early_init = early_init_transmeta,
103 .c_init = init_transmeta, 105 .c_init = init_transmeta,
104 .c_identify = transmeta_identify, 106 .c_x86_vendor = X86_VENDOR_TRANSMETA,
105}; 107};
106 108
107cpu_vendor_dev_register(X86_VENDOR_TRANSMETA, &transmeta_cpu_dev); 109cpu_dev_register(transmeta_cpu_dev);
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
index b1fc90989d75..e777f79e0960 100644
--- a/arch/x86/kernel/cpu/umc.c
+++ b/arch/x86/kernel/cpu/umc.c
@@ -19,7 +19,8 @@ static struct cpu_dev umc_cpu_dev __cpuinitdata = {
19 } 19 }
20 }, 20 },
21 }, 21 },
22 .c_x86_vendor = X86_VENDOR_UMC,
22}; 23};
23 24
24cpu_vendor_dev_register(X86_VENDOR_UMC, &umc_cpu_dev); 25cpu_dev_register(umc_cpu_dev);
25 26
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 8e9cd6a8ec12..72cefd1e649b 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -36,7 +36,6 @@
36#include <linux/smp_lock.h> 36#include <linux/smp_lock.h>
37#include <linux/major.h> 37#include <linux/major.h>
38#include <linux/fs.h> 38#include <linux/fs.h>
39#include <linux/smp_lock.h>
40#include <linux/device.h> 39#include <linux/device.h>
41#include <linux/cpu.h> 40#include <linux/cpu.h>
42#include <linux/notifier.h> 41#include <linux/notifier.h>
@@ -148,8 +147,8 @@ static __cpuinit int cpuid_device_create(int cpu)
148{ 147{
149 struct device *dev; 148 struct device *dev;
150 149
151 dev = device_create_drvdata(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), 150 dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), NULL,
152 NULL, "cpu%d", cpu); 151 "cpu%d", cpu);
153 return IS_ERR(dev) ? PTR_ERR(dev) : 0; 152 return IS_ERR(dev) ? PTR_ERR(dev) : 0;
154} 153}
155 154
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c
index 72d0c56c1b48..f7cdb3b457aa 100644
--- a/arch/x86/kernel/crash_dump_32.c
+++ b/arch/x86/kernel/crash_dump_32.c
@@ -13,6 +13,9 @@
13 13
14static void *kdump_buf_page; 14static void *kdump_buf_page;
15 15
16/* Stores the physical address of elf header of crash image. */
17unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
18
16/** 19/**
17 * copy_oldmem_page - copy one page from "oldmem" 20 * copy_oldmem_page - copy one page from "oldmem"
18 * @pfn: page frame number to be copied 21 * @pfn: page frame number to be copied
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index 15e6c6bc4a46..045b36cada65 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -7,9 +7,11 @@
7 7
8#include <linux/errno.h> 8#include <linux/errno.h>
9#include <linux/crash_dump.h> 9#include <linux/crash_dump.h>
10#include <linux/uaccess.h>
11#include <linux/io.h>
10 12
11#include <asm/uaccess.h> 13/* Stores the physical address of elf header of crash image. */
12#include <asm/io.h> 14unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
13 15
14/** 16/**
15 * copy_oldmem_page - copy one page from "oldmem" 17 * copy_oldmem_page - copy one page from "oldmem"
@@ -25,7 +27,7 @@
25 * in the current kernel. We stitch up a pte, similar to kmap_atomic. 27 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
26 */ 28 */
27ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 29ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
28 size_t csize, unsigned long offset, int userbuf) 30 size_t csize, unsigned long offset, int userbuf)
29{ 31{
30 void *vaddr; 32 void *vaddr;
31 33
@@ -33,14 +35,16 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
33 return 0; 35 return 0;
34 36
35 vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); 37 vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
38 if (!vaddr)
39 return -ENOMEM;
36 40
37 if (userbuf) { 41 if (userbuf) {
38 if (copy_to_user(buf, (vaddr + offset), csize)) { 42 if (copy_to_user(buf, vaddr + offset, csize)) {
39 iounmap(vaddr); 43 iounmap(vaddr);
40 return -EFAULT; 44 return -EFAULT;
41 } 45 }
42 } else 46 } else
43 memcpy(buf, (vaddr + offset), csize); 47 memcpy(buf, vaddr + offset, csize);
44 48
45 iounmap(vaddr); 49 iounmap(vaddr);
46 return csize; 50 return csize;
diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
index a47798b59f07..b4f14c6c09d9 100644
--- a/arch/x86/kernel/doublefault_32.c
+++ b/arch/x86/kernel/doublefault_32.c
@@ -66,6 +66,6 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
66 .ds = __USER_DS, 66 .ds = __USER_DS,
67 .fs = __KERNEL_PERCPU, 67 .fs = __KERNEL_PERCPU,
68 68
69 .__cr3 = __pa(swapper_pg_dir) 69 .__cr3 = __pa_nodebug(swapper_pg_dir),
70 } 70 }
71}; 71};
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index 11c11b8ec48d..2b69994fd3a8 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -2,26 +2,49 @@
2 * Debug Store support 2 * Debug Store support
3 * 3 *
4 * This provides a low-level interface to the hardware's Debug Store 4 * This provides a low-level interface to the hardware's Debug Store
5 * feature that is used for last branch recording (LBR) and 5 * feature that is used for branch trace store (BTS) and
6 * precise-event based sampling (PEBS). 6 * precise-event based sampling (PEBS).
7 * 7 *
8 * Different architectures use a different DS layout/pointer size. 8 * It manages:
9 * The below functions therefore work on a void*. 9 * - per-thread and per-cpu allocation of BTS and PEBS
10 * - buffer memory allocation (optional)
11 * - buffer overflow handling
12 * - buffer access
10 * 13 *
14 * It assumes:
15 * - get_task_struct on all parameter tasks
16 * - current is allowed to trace parameter tasks
11 * 17 *
12 * Since there is no user for PEBS, yet, only LBR (or branch
13 * trace store, BTS) is supported.
14 * 18 *
15 * 19 * Copyright (C) 2007-2008 Intel Corporation.
16 * Copyright (C) 2007 Intel Corporation. 20 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
17 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
18 */ 21 */
19 22
23
24#ifdef CONFIG_X86_DS
25
20#include <asm/ds.h> 26#include <asm/ds.h>
21 27
22#include <linux/errno.h> 28#include <linux/errno.h>
23#include <linux/string.h> 29#include <linux/string.h>
24#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/sched.h>
32#include <linux/mm.h>
33
34
35/*
36 * The configuration for a particular DS hardware implementation.
37 */
38struct ds_configuration {
39 /* the size of the DS structure in bytes */
40 unsigned char sizeof_ds;
41 /* the size of one pointer-typed field in the DS structure in bytes;
42 this covers the first 8 fields related to buffer management. */
43 unsigned char sizeof_field;
44 /* the size of a BTS/PEBS record in bytes */
45 unsigned char sizeof_rec[2];
46};
47static struct ds_configuration ds_cfg;
25 48
26 49
27/* 50/*
@@ -44,378 +67,747 @@
44 * (interrupt occurs when write pointer passes interrupt pointer) 67 * (interrupt occurs when write pointer passes interrupt pointer)
45 * - value to which counter is reset following counter overflow 68 * - value to which counter is reset following counter overflow
46 * 69 *
47 * On later architectures, the last branch recording hardware uses 70 * Later architectures use 64bit pointers throughout, whereas earlier
48 * 64bit pointers even in 32bit mode. 71 * architectures use 32bit pointers in 32bit mode.
49 *
50 *
51 * Branch Trace Store (BTS) records store information about control
52 * flow changes. They at least provide the following information:
53 * - source linear address
54 * - destination linear address
55 * 72 *
56 * Netburst supported a predicated bit that had been dropped in later
57 * architectures. We do not suppor it.
58 * 73 *
74 * We compute the base address for the first 8 fields based on:
75 * - the field size stored in the DS configuration
76 * - the relative field position
77 * - an offset giving the start of the respective region
59 * 78 *
60 * In order to abstract from the actual DS and BTS layout, we describe 79 * This offset is further used to index various arrays holding
61 * the access to the relevant fields. 80 * information for BTS and PEBS at the respective index.
62 * Thanks to Andi Kleen for proposing this design.
63 * 81 *
64 * The implementation, however, is not as general as it might seem. In 82 * On later 32bit processors, we only access the lower 32bit of the
65 * order to stay somewhat simple and efficient, we assume an 83 * 64bit pointer fields. The upper halves will be zeroed out.
66 * underlying unsigned type (mostly a pointer type) and we expect the
67 * field to be at least as big as that type.
68 */ 84 */
69 85
70/* 86enum ds_field {
71 * A special from_ip address to indicate that the BTS record is an 87 ds_buffer_base = 0,
72 * info record that needs to be interpreted or skipped. 88 ds_index,
73 */ 89 ds_absolute_maximum,
74#define BTS_ESCAPE_ADDRESS (-1) 90 ds_interrupt_threshold,
91};
75 92
76/* 93enum ds_qualifier {
77 * A field access descriptor 94 ds_bts = 0,
78 */ 95 ds_pebs
79struct access_desc {
80 unsigned char offset;
81 unsigned char size;
82}; 96};
83 97
98static inline unsigned long ds_get(const unsigned char *base,
99 enum ds_qualifier qual, enum ds_field field)
100{
101 base += (ds_cfg.sizeof_field * (field + (4 * qual)));
102 return *(unsigned long *)base;
103}
104
105static inline void ds_set(unsigned char *base, enum ds_qualifier qual,
106 enum ds_field field, unsigned long value)
107{
108 base += (ds_cfg.sizeof_field * (field + (4 * qual)));
109 (*(unsigned long *)base) = value;
110}
111
112
84/* 113/*
85 * The configuration for a particular DS/BTS hardware implementation. 114 * Locking is done only for allocating BTS or PEBS resources and for
115 * guarding context and buffer memory allocation.
116 *
117 * Most functions require the current task to own the ds context part
118 * they are going to access. All the locking is done when validating
119 * access to the context.
86 */ 120 */
87struct ds_configuration { 121static spinlock_t ds_lock = __SPIN_LOCK_UNLOCKED(ds_lock);
88 /* the DS configuration */
89 unsigned char sizeof_ds;
90 struct access_desc bts_buffer_base;
91 struct access_desc bts_index;
92 struct access_desc bts_absolute_maximum;
93 struct access_desc bts_interrupt_threshold;
94 /* the BTS configuration */
95 unsigned char sizeof_bts;
96 struct access_desc from_ip;
97 struct access_desc to_ip;
98 /* BTS variants used to store additional information like
99 timestamps */
100 struct access_desc info_type;
101 struct access_desc info_data;
102 unsigned long debugctl_mask;
103};
104 122
105/* 123/*
106 * The global configuration used by the below accessor functions 124 * Validate that the current task is allowed to access the BTS/PEBS
125 * buffer of the parameter task.
126 *
127 * Returns 0, if access is granted; -Eerrno, otherwise.
107 */ 128 */
108static struct ds_configuration ds_cfg; 129static inline int ds_validate_access(struct ds_context *context,
130 enum ds_qualifier qual)
131{
132 if (!context)
133 return -EPERM;
134
135 if (context->owner[qual] == current)
136 return 0;
137
138 return -EPERM;
139}
140
109 141
110/* 142/*
111 * Accessor functions for some DS and BTS fields using the above 143 * We either support (system-wide) per-cpu or per-thread allocation.
112 * global ptrace_bts_cfg. 144 * We distinguish the two based on the task_struct pointer, where a
145 * NULL pointer indicates per-cpu allocation for the current cpu.
146 *
147 * Allocations are use-counted. As soon as resources are allocated,
148 * further allocations must be of the same type (per-cpu or
149 * per-thread). We model this by counting allocations (i.e. the number
150 * of tracers of a certain type) for one type negatively:
151 * =0 no tracers
152 * >0 number of per-thread tracers
153 * <0 number of per-cpu tracers
154 *
155 * The below functions to get and put tracers and to check the
156 * allocation type require the ds_lock to be held by the caller.
157 *
158 * Tracers essentially gives the number of ds contexts for a certain
159 * type of allocation.
113 */ 160 */
114static inline unsigned long get_bts_buffer_base(char *base) 161static long tracers;
162
163static inline void get_tracer(struct task_struct *task)
115{ 164{
116 return *(unsigned long *)(base + ds_cfg.bts_buffer_base.offset); 165 tracers += (task ? 1 : -1);
117} 166}
118static inline void set_bts_buffer_base(char *base, unsigned long value) 167
168static inline void put_tracer(struct task_struct *task)
119{ 169{
120 (*(unsigned long *)(base + ds_cfg.bts_buffer_base.offset)) = value; 170 tracers -= (task ? 1 : -1);
121} 171}
122static inline unsigned long get_bts_index(char *base) 172
173static inline int check_tracer(struct task_struct *task)
123{ 174{
124 return *(unsigned long *)(base + ds_cfg.bts_index.offset); 175 return (task ? (tracers >= 0) : (tracers <= 0));
125} 176}
126static inline void set_bts_index(char *base, unsigned long value) 177
178
179/*
180 * The DS context is either attached to a thread or to a cpu:
181 * - in the former case, the thread_struct contains a pointer to the
182 * attached context.
183 * - in the latter case, we use a static array of per-cpu context
184 * pointers.
185 *
186 * Contexts are use-counted. They are allocated on first access and
187 * deallocated when the last user puts the context.
188 *
189 * We distinguish between an allocating and a non-allocating get of a
190 * context:
191 * - the allocating get is used for requesting BTS/PEBS resources. It
192 * requires the caller to hold the global ds_lock.
193 * - the non-allocating get is used for all other cases. A
194 * non-existing context indicates an error. It acquires and releases
195 * the ds_lock itself for obtaining the context.
196 *
197 * A context and its DS configuration are allocated and deallocated
198 * together. A context always has a DS configuration of the
199 * appropriate size.
200 */
201static DEFINE_PER_CPU(struct ds_context *, system_context);
202
203#define this_system_context per_cpu(system_context, smp_processor_id())
204
205/*
206 * Returns the pointer to the parameter task's context or to the
207 * system-wide context, if task is NULL.
208 *
209 * Increases the use count of the returned context, if not NULL.
210 */
211static inline struct ds_context *ds_get_context(struct task_struct *task)
127{ 212{
128 (*(unsigned long *)(base + ds_cfg.bts_index.offset)) = value; 213 struct ds_context *context;
214
215 spin_lock(&ds_lock);
216
217 context = (task ? task->thread.ds_ctx : this_system_context);
218 if (context)
219 context->count++;
220
221 spin_unlock(&ds_lock);
222
223 return context;
129} 224}
130static inline unsigned long get_bts_absolute_maximum(char *base) 225
226/*
227 * Same as ds_get_context, but allocates the context and it's DS
228 * structure, if necessary; returns NULL; if out of memory.
229 *
230 * pre: requires ds_lock to be held
231 */
232static inline struct ds_context *ds_alloc_context(struct task_struct *task)
131{ 233{
132 return *(unsigned long *)(base + ds_cfg.bts_absolute_maximum.offset); 234 struct ds_context **p_context =
235 (task ? &task->thread.ds_ctx : &this_system_context);
236 struct ds_context *context = *p_context;
237
238 if (!context) {
239 context = kzalloc(sizeof(*context), GFP_KERNEL);
240
241 if (!context)
242 return NULL;
243
244 context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
245 if (!context->ds) {
246 kfree(context);
247 return NULL;
248 }
249
250 *p_context = context;
251
252 context->this = p_context;
253 context->task = task;
254
255 if (task)
256 set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
257
258 if (!task || (task == current))
259 wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0);
260
261 get_tracer(task);
262 }
263
264 context->count++;
265
266 return context;
133} 267}
134static inline void set_bts_absolute_maximum(char *base, unsigned long value) 268
269/*
270 * Decreases the use count of the parameter context, if not NULL.
271 * Deallocates the context, if the use count reaches zero.
272 */
273static inline void ds_put_context(struct ds_context *context)
135{ 274{
136 (*(unsigned long *)(base + ds_cfg.bts_absolute_maximum.offset)) = value; 275 if (!context)
276 return;
277
278 spin_lock(&ds_lock);
279
280 if (--context->count)
281 goto out;
282
283 *(context->this) = NULL;
284
285 if (context->task)
286 clear_tsk_thread_flag(context->task, TIF_DS_AREA_MSR);
287
288 if (!context->task || (context->task == current))
289 wrmsrl(MSR_IA32_DS_AREA, 0);
290
291 put_tracer(context->task);
292
293 /* free any leftover buffers from tracers that did not
294 * deallocate them properly. */
295 kfree(context->buffer[ds_bts]);
296 kfree(context->buffer[ds_pebs]);
297 kfree(context->ds);
298 kfree(context);
299 out:
300 spin_unlock(&ds_lock);
137} 301}
138static inline unsigned long get_bts_interrupt_threshold(char *base) 302
303
304/*
305 * Handle a buffer overflow
306 *
307 * task: the task whose buffers are overflowing;
308 * NULL for a buffer overflow on the current cpu
309 * context: the ds context
310 * qual: the buffer type
311 */
312static void ds_overflow(struct task_struct *task, struct ds_context *context,
313 enum ds_qualifier qual)
139{ 314{
140 return *(unsigned long *)(base + ds_cfg.bts_interrupt_threshold.offset); 315 if (!context)
316 return;
317
318 if (context->callback[qual])
319 (*context->callback[qual])(task);
320
321 /* todo: do some more overflow handling */
141} 322}
142static inline void set_bts_interrupt_threshold(char *base, unsigned long value) 323
324
325/*
326 * Allocate a non-pageable buffer of the parameter size.
327 * Checks the memory and the locked memory rlimit.
328 *
329 * Returns the buffer, if successful;
330 * NULL, if out of memory or rlimit exceeded.
331 *
332 * size: the requested buffer size in bytes
333 * pages (out): if not NULL, contains the number of pages reserved
334 */
335static inline void *ds_allocate_buffer(size_t size, unsigned int *pages)
143{ 336{
144 (*(unsigned long *)(base + ds_cfg.bts_interrupt_threshold.offset)) = value; 337 unsigned long rlim, vm, pgsz;
338 void *buffer;
339
340 pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
341
342 rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
343 vm = current->mm->total_vm + pgsz;
344 if (rlim < vm)
345 return NULL;
346
347 rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
348 vm = current->mm->locked_vm + pgsz;
349 if (rlim < vm)
350 return NULL;
351
352 buffer = kzalloc(size, GFP_KERNEL);
353 if (!buffer)
354 return NULL;
355
356 current->mm->total_vm += pgsz;
357 current->mm->locked_vm += pgsz;
358
359 if (pages)
360 *pages = pgsz;
361
362 return buffer;
145} 363}
146static inline unsigned long get_from_ip(char *base) 364
365static int ds_request(struct task_struct *task, void *base, size_t size,
366 ds_ovfl_callback_t ovfl, enum ds_qualifier qual)
147{ 367{
148 return *(unsigned long *)(base + ds_cfg.from_ip.offset); 368 struct ds_context *context;
369 unsigned long buffer, adj;
370 const unsigned long alignment = (1 << 3);
371 int error = 0;
372
373 if (!ds_cfg.sizeof_ds)
374 return -EOPNOTSUPP;
375
376 /* we require some space to do alignment adjustments below */
377 if (size < (alignment + ds_cfg.sizeof_rec[qual]))
378 return -EINVAL;
379
380 /* buffer overflow notification is not yet implemented */
381 if (ovfl)
382 return -EOPNOTSUPP;
383
384
385 spin_lock(&ds_lock);
386
387 if (!check_tracer(task))
388 return -EPERM;
389
390 error = -ENOMEM;
391 context = ds_alloc_context(task);
392 if (!context)
393 goto out_unlock;
394
395 error = -EALREADY;
396 if (context->owner[qual] == current)
397 goto out_unlock;
398 error = -EPERM;
399 if (context->owner[qual] != NULL)
400 goto out_unlock;
401 context->owner[qual] = current;
402
403 spin_unlock(&ds_lock);
404
405
406 error = -ENOMEM;
407 if (!base) {
408 base = ds_allocate_buffer(size, &context->pages[qual]);
409 if (!base)
410 goto out_release;
411
412 context->buffer[qual] = base;
413 }
414 error = 0;
415
416 context->callback[qual] = ovfl;
417
418 /* adjust the buffer address and size to meet alignment
419 * constraints:
420 * - buffer is double-word aligned
421 * - size is multiple of record size
422 *
423 * We checked the size at the very beginning; we have enough
424 * space to do the adjustment.
425 */
426 buffer = (unsigned long)base;
427
428 adj = ALIGN(buffer, alignment) - buffer;
429 buffer += adj;
430 size -= adj;
431
432 size /= ds_cfg.sizeof_rec[qual];
433 size *= ds_cfg.sizeof_rec[qual];
434
435 ds_set(context->ds, qual, ds_buffer_base, buffer);
436 ds_set(context->ds, qual, ds_index, buffer);
437 ds_set(context->ds, qual, ds_absolute_maximum, buffer + size);
438
439 if (ovfl) {
440 /* todo: select a suitable interrupt threshold */
441 } else
442 ds_set(context->ds, qual,
443 ds_interrupt_threshold, buffer + size + 1);
444
445 /* we keep the context until ds_release */
446 return error;
447
448 out_release:
449 context->owner[qual] = NULL;
450 ds_put_context(context);
451 return error;
452
453 out_unlock:
454 spin_unlock(&ds_lock);
455 ds_put_context(context);
456 return error;
149} 457}
150static inline void set_from_ip(char *base, unsigned long value) 458
459int ds_request_bts(struct task_struct *task, void *base, size_t size,
460 ds_ovfl_callback_t ovfl)
151{ 461{
152 (*(unsigned long *)(base + ds_cfg.from_ip.offset)) = value; 462 return ds_request(task, base, size, ovfl, ds_bts);
153} 463}
154static inline unsigned long get_to_ip(char *base) 464
465int ds_request_pebs(struct task_struct *task, void *base, size_t size,
466 ds_ovfl_callback_t ovfl)
155{ 467{
156 return *(unsigned long *)(base + ds_cfg.to_ip.offset); 468 return ds_request(task, base, size, ovfl, ds_pebs);
157} 469}
158static inline void set_to_ip(char *base, unsigned long value) 470
471static int ds_release(struct task_struct *task, enum ds_qualifier qual)
159{ 472{
160 (*(unsigned long *)(base + ds_cfg.to_ip.offset)) = value; 473 struct ds_context *context;
474 int error;
475
476 context = ds_get_context(task);
477 error = ds_validate_access(context, qual);
478 if (error < 0)
479 goto out;
480
481 kfree(context->buffer[qual]);
482 context->buffer[qual] = NULL;
483
484 current->mm->total_vm -= context->pages[qual];
485 current->mm->locked_vm -= context->pages[qual];
486 context->pages[qual] = 0;
487 context->owner[qual] = NULL;
488
489 /*
490 * we put the context twice:
491 * once for the ds_get_context
492 * once for the corresponding ds_request
493 */
494 ds_put_context(context);
495 out:
496 ds_put_context(context);
497 return error;
161} 498}
162static inline unsigned char get_info_type(char *base) 499
500int ds_release_bts(struct task_struct *task)
163{ 501{
164 return *(unsigned char *)(base + ds_cfg.info_type.offset); 502 return ds_release(task, ds_bts);
165} 503}
166static inline void set_info_type(char *base, unsigned char value) 504
505int ds_release_pebs(struct task_struct *task)
167{ 506{
168 (*(unsigned char *)(base + ds_cfg.info_type.offset)) = value; 507 return ds_release(task, ds_pebs);
169} 508}
170static inline unsigned long get_info_data(char *base) 509
510static int ds_get_index(struct task_struct *task, size_t *pos,
511 enum ds_qualifier qual)
171{ 512{
172 return *(unsigned long *)(base + ds_cfg.info_data.offset); 513 struct ds_context *context;
514 unsigned long base, index;
515 int error;
516
517 context = ds_get_context(task);
518 error = ds_validate_access(context, qual);
519 if (error < 0)
520 goto out;
521
522 base = ds_get(context->ds, qual, ds_buffer_base);
523 index = ds_get(context->ds, qual, ds_index);
524
525 error = ((index - base) / ds_cfg.sizeof_rec[qual]);
526 if (pos)
527 *pos = error;
528 out:
529 ds_put_context(context);
530 return error;
173} 531}
174static inline void set_info_data(char *base, unsigned long value) 532
533int ds_get_bts_index(struct task_struct *task, size_t *pos)
175{ 534{
176 (*(unsigned long *)(base + ds_cfg.info_data.offset)) = value; 535 return ds_get_index(task, pos, ds_bts);
177} 536}
178 537
538int ds_get_pebs_index(struct task_struct *task, size_t *pos)
539{
540 return ds_get_index(task, pos, ds_pebs);
541}
179 542
180int ds_allocate(void **dsp, size_t bts_size_in_bytes) 543static int ds_get_end(struct task_struct *task, size_t *pos,
544 enum ds_qualifier qual)
181{ 545{
182 size_t bts_size_in_records; 546 struct ds_context *context;
183 unsigned long bts; 547 unsigned long base, end;
184 void *ds; 548 int error;
549
550 context = ds_get_context(task);
551 error = ds_validate_access(context, qual);
552 if (error < 0)
553 goto out;
554
555 base = ds_get(context->ds, qual, ds_buffer_base);
556 end = ds_get(context->ds, qual, ds_absolute_maximum);
557
558 error = ((end - base) / ds_cfg.sizeof_rec[qual]);
559 if (pos)
560 *pos = error;
561 out:
562 ds_put_context(context);
563 return error;
564}
185 565
186 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts) 566int ds_get_bts_end(struct task_struct *task, size_t *pos)
187 return -EOPNOTSUPP; 567{
568 return ds_get_end(task, pos, ds_bts);
569}
188 570
189 if (bts_size_in_bytes < 0) 571int ds_get_pebs_end(struct task_struct *task, size_t *pos)
190 return -EINVAL; 572{
573 return ds_get_end(task, pos, ds_pebs);
574}
191 575
192 bts_size_in_records = 576static int ds_access(struct task_struct *task, size_t index,
193 bts_size_in_bytes / ds_cfg.sizeof_bts; 577 const void **record, enum ds_qualifier qual)
194 bts_size_in_bytes = 578{
195 bts_size_in_records * ds_cfg.sizeof_bts; 579 struct ds_context *context;
580 unsigned long base, idx;
581 int error;
196 582
197 if (bts_size_in_bytes <= 0) 583 if (!record)
198 return -EINVAL; 584 return -EINVAL;
199 585
200 bts = (unsigned long)kzalloc(bts_size_in_bytes, GFP_KERNEL); 586 context = ds_get_context(task);
201 587 error = ds_validate_access(context, qual);
202 if (!bts) 588 if (error < 0)
203 return -ENOMEM; 589 goto out;
204 590
205 ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL); 591 base = ds_get(context->ds, qual, ds_buffer_base);
592 idx = base + (index * ds_cfg.sizeof_rec[qual]);
206 593
207 if (!ds) { 594 error = -EINVAL;
208 kfree((void *)bts); 595 if (idx > ds_get(context->ds, qual, ds_absolute_maximum))
209 return -ENOMEM; 596 goto out;
210 }
211
212 set_bts_buffer_base(ds, bts);
213 set_bts_index(ds, bts);
214 set_bts_absolute_maximum(ds, bts + bts_size_in_bytes);
215 set_bts_interrupt_threshold(ds, bts + bts_size_in_bytes + 1);
216 597
217 *dsp = ds; 598 *record = (const void *)idx;
218 return 0; 599 error = ds_cfg.sizeof_rec[qual];
600 out:
601 ds_put_context(context);
602 return error;
219} 603}
220 604
221int ds_free(void **dsp) 605int ds_access_bts(struct task_struct *task, size_t index, const void **record)
222{ 606{
223 if (*dsp) { 607 return ds_access(task, index, record, ds_bts);
224 kfree((void *)get_bts_buffer_base(*dsp));
225 kfree(*dsp);
226 *dsp = NULL;
227 }
228 return 0;
229} 608}
230 609
231int ds_get_bts_size(void *ds) 610int ds_access_pebs(struct task_struct *task, size_t index, const void **record)
232{ 611{
233 int size_in_bytes; 612 return ds_access(task, index, record, ds_pebs);
234
235 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
236 return -EOPNOTSUPP;
237
238 if (!ds)
239 return 0;
240
241 size_in_bytes =
242 get_bts_absolute_maximum(ds) -
243 get_bts_buffer_base(ds);
244 return size_in_bytes;
245} 613}
246 614
247int ds_get_bts_end(void *ds) 615static int ds_write(struct task_struct *task, const void *record, size_t size,
616 enum ds_qualifier qual, int force)
248{ 617{
249 int size_in_bytes = ds_get_bts_size(ds); 618 struct ds_context *context;
250 619 int error;
251 if (size_in_bytes <= 0)
252 return size_in_bytes;
253 620
254 return size_in_bytes / ds_cfg.sizeof_bts; 621 if (!record)
255} 622 return -EINVAL;
256 623
257int ds_get_bts_index(void *ds) 624 error = -EPERM;
258{ 625 context = ds_get_context(task);
259 int index_offset_in_bytes; 626 if (!context)
627 goto out;
260 628
261 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts) 629 if (!force) {
262 return -EOPNOTSUPP; 630 error = ds_validate_access(context, qual);
631 if (error < 0)
632 goto out;
633 }
263 634
264 index_offset_in_bytes = 635 error = 0;
265 get_bts_index(ds) - 636 while (size) {
266 get_bts_buffer_base(ds); 637 unsigned long base, index, end, write_end, int_th;
638 unsigned long write_size, adj_write_size;
639
640 /*
641 * write as much as possible without producing an
642 * overflow interrupt.
643 *
644 * interrupt_threshold must either be
645 * - bigger than absolute_maximum or
646 * - point to a record between buffer_base and absolute_maximum
647 *
648 * index points to a valid record.
649 */
650 base = ds_get(context->ds, qual, ds_buffer_base);
651 index = ds_get(context->ds, qual, ds_index);
652 end = ds_get(context->ds, qual, ds_absolute_maximum);
653 int_th = ds_get(context->ds, qual, ds_interrupt_threshold);
654
655 write_end = min(end, int_th);
656
657 /* if we are already beyond the interrupt threshold,
658 * we fill the entire buffer */
659 if (write_end <= index)
660 write_end = end;
661
662 if (write_end <= index)
663 goto out;
664
665 write_size = min((unsigned long) size, write_end - index);
666 memcpy((void *)index, record, write_size);
667
668 record = (const char *)record + write_size;
669 size -= write_size;
670 error += write_size;
671
672 adj_write_size = write_size / ds_cfg.sizeof_rec[qual];
673 adj_write_size *= ds_cfg.sizeof_rec[qual];
674
675 /* zero out trailing bytes */
676 memset((char *)index + write_size, 0,
677 adj_write_size - write_size);
678 index += adj_write_size;
679
680 if (index >= end)
681 index = base;
682 ds_set(context->ds, qual, ds_index, index);
683
684 if (index >= int_th)
685 ds_overflow(task, context, qual);
686 }
267 687
268 return index_offset_in_bytes / ds_cfg.sizeof_bts; 688 out:
689 ds_put_context(context);
690 return error;
269} 691}
270 692
271int ds_set_overflow(void *ds, int method) 693int ds_write_bts(struct task_struct *task, const void *record, size_t size)
272{ 694{
273 switch (method) { 695 return ds_write(task, record, size, ds_bts, /* force = */ 0);
274 case DS_O_SIGNAL:
275 return -EOPNOTSUPP;
276 case DS_O_WRAP:
277 return 0;
278 default:
279 return -EINVAL;
280 }
281} 696}
282 697
283int ds_get_overflow(void *ds) 698int ds_write_pebs(struct task_struct *task, const void *record, size_t size)
284{ 699{
285 return DS_O_WRAP; 700 return ds_write(task, record, size, ds_pebs, /* force = */ 0);
286} 701}
287 702
288int ds_clear(void *ds) 703int ds_unchecked_write_bts(struct task_struct *task,
704 const void *record, size_t size)
289{ 705{
290 int bts_size = ds_get_bts_size(ds); 706 return ds_write(task, record, size, ds_bts, /* force = */ 1);
291 unsigned long bts_base;
292
293 if (bts_size <= 0)
294 return bts_size;
295
296 bts_base = get_bts_buffer_base(ds);
297 memset((void *)bts_base, 0, bts_size);
298
299 set_bts_index(ds, bts_base);
300 return 0;
301} 707}
302 708
303int ds_read_bts(void *ds, int index, struct bts_struct *out) 709int ds_unchecked_write_pebs(struct task_struct *task,
710 const void *record, size_t size)
304{ 711{
305 void *bts; 712 return ds_write(task, record, size, ds_pebs, /* force = */ 1);
713}
306 714
307 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts) 715static int ds_reset_or_clear(struct task_struct *task,
308 return -EOPNOTSUPP; 716 enum ds_qualifier qual, int clear)
717{
718 struct ds_context *context;
719 unsigned long base, end;
720 int error;
309 721
310 if (index < 0) 722 context = ds_get_context(task);
311 return -EINVAL; 723 error = ds_validate_access(context, qual);
724 if (error < 0)
725 goto out;
312 726
313 if (index >= ds_get_bts_size(ds)) 727 base = ds_get(context->ds, qual, ds_buffer_base);
314 return -EINVAL; 728 end = ds_get(context->ds, qual, ds_absolute_maximum);
315 729
316 bts = (void *)(get_bts_buffer_base(ds) + (index * ds_cfg.sizeof_bts)); 730 if (clear)
731 memset((void *)base, 0, end - base);
317 732
318 memset(out, 0, sizeof(*out)); 733 ds_set(context->ds, qual, ds_index, base);
319 if (get_from_ip(bts) == BTS_ESCAPE_ADDRESS) {
320 out->qualifier = get_info_type(bts);
321 out->variant.jiffies = get_info_data(bts);
322 } else {
323 out->qualifier = BTS_BRANCH;
324 out->variant.lbr.from_ip = get_from_ip(bts);
325 out->variant.lbr.to_ip = get_to_ip(bts);
326 }
327 734
328 return sizeof(*out);; 735 error = 0;
736 out:
737 ds_put_context(context);
738 return error;
329} 739}
330 740
331int ds_write_bts(void *ds, const struct bts_struct *in) 741int ds_reset_bts(struct task_struct *task)
332{ 742{
333 unsigned long bts; 743 return ds_reset_or_clear(task, ds_bts, /* clear = */ 0);
334 744}
335 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
336 return -EOPNOTSUPP;
337
338 if (ds_get_bts_size(ds) <= 0)
339 return -ENXIO;
340 745
341 bts = get_bts_index(ds); 746int ds_reset_pebs(struct task_struct *task)
747{
748 return ds_reset_or_clear(task, ds_pebs, /* clear = */ 0);
749}
342 750
343 memset((void *)bts, 0, ds_cfg.sizeof_bts); 751int ds_clear_bts(struct task_struct *task)
344 switch (in->qualifier) { 752{
345 case BTS_INVALID: 753 return ds_reset_or_clear(task, ds_bts, /* clear = */ 1);
346 break; 754}
347 755
348 case BTS_BRANCH: 756int ds_clear_pebs(struct task_struct *task)
349 set_from_ip((void *)bts, in->variant.lbr.from_ip); 757{
350 set_to_ip((void *)bts, in->variant.lbr.to_ip); 758 return ds_reset_or_clear(task, ds_pebs, /* clear = */ 1);
351 break; 759}
352 760
353 case BTS_TASK_ARRIVES: 761int ds_get_pebs_reset(struct task_struct *task, u64 *value)
354 case BTS_TASK_DEPARTS: 762{
355 set_from_ip((void *)bts, BTS_ESCAPE_ADDRESS); 763 struct ds_context *context;
356 set_info_type((void *)bts, in->qualifier); 764 int error;
357 set_info_data((void *)bts, in->variant.jiffies);
358 break;
359 765
360 default: 766 if (!value)
361 return -EINVAL; 767 return -EINVAL;
362 }
363 768
364 bts = bts + ds_cfg.sizeof_bts; 769 context = ds_get_context(task);
365 if (bts >= get_bts_absolute_maximum(ds)) 770 error = ds_validate_access(context, ds_pebs);
366 bts = get_bts_buffer_base(ds); 771 if (error < 0)
367 set_bts_index(ds, bts); 772 goto out;
368 773
369 return ds_cfg.sizeof_bts; 774 *value = *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8));
775
776 error = 0;
777 out:
778 ds_put_context(context);
779 return error;
370} 780}
371 781
372unsigned long ds_debugctl_mask(void) 782int ds_set_pebs_reset(struct task_struct *task, u64 value)
373{ 783{
374 return ds_cfg.debugctl_mask; 784 struct ds_context *context;
375} 785 int error;
376 786
377#ifdef __i386__ 787 context = ds_get_context(task);
378static const struct ds_configuration ds_cfg_netburst = { 788 error = ds_validate_access(context, ds_pebs);
379 .sizeof_ds = 9 * 4, 789 if (error < 0)
380 .bts_buffer_base = { 0, 4 }, 790 goto out;
381 .bts_index = { 4, 4 },
382 .bts_absolute_maximum = { 8, 4 },
383 .bts_interrupt_threshold = { 12, 4 },
384 .sizeof_bts = 3 * 4,
385 .from_ip = { 0, 4 },
386 .to_ip = { 4, 4 },
387 .info_type = { 4, 1 },
388 .info_data = { 8, 4 },
389 .debugctl_mask = (1<<2)|(1<<3)
390};
391 791
392static const struct ds_configuration ds_cfg_pentium_m = { 792 *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8)) = value;
393 .sizeof_ds = 9 * 4, 793
394 .bts_buffer_base = { 0, 4 }, 794 error = 0;
395 .bts_index = { 4, 4 }, 795 out:
396 .bts_absolute_maximum = { 8, 4 }, 796 ds_put_context(context);
397 .bts_interrupt_threshold = { 12, 4 }, 797 return error;
398 .sizeof_bts = 3 * 4, 798}
399 .from_ip = { 0, 4 }, 799
400 .to_ip = { 4, 4 }, 800static const struct ds_configuration ds_cfg_var = {
401 .info_type = { 4, 1 }, 801 .sizeof_ds = sizeof(long) * 12,
402 .info_data = { 8, 4 }, 802 .sizeof_field = sizeof(long),
403 .debugctl_mask = (1<<6)|(1<<7) 803 .sizeof_rec[ds_bts] = sizeof(long) * 3,
804 .sizeof_rec[ds_pebs] = sizeof(long) * 10
404}; 805};
405#endif /* _i386_ */ 806static const struct ds_configuration ds_cfg_64 = {
406 807 .sizeof_ds = 8 * 12,
407static const struct ds_configuration ds_cfg_core2 = { 808 .sizeof_field = 8,
408 .sizeof_ds = 9 * 8, 809 .sizeof_rec[ds_bts] = 8 * 3,
409 .bts_buffer_base = { 0, 8 }, 810 .sizeof_rec[ds_pebs] = 8 * 10
410 .bts_index = { 8, 8 },
411 .bts_absolute_maximum = { 16, 8 },
412 .bts_interrupt_threshold = { 24, 8 },
413 .sizeof_bts = 3 * 8,
414 .from_ip = { 0, 8 },
415 .to_ip = { 8, 8 },
416 .info_type = { 8, 1 },
417 .info_data = { 16, 8 },
418 .debugctl_mask = (1<<6)|(1<<7)|(1<<9)
419}; 811};
420 812
421static inline void 813static inline void
@@ -429,14 +821,13 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
429 switch (c->x86) { 821 switch (c->x86) {
430 case 0x6: 822 case 0x6:
431 switch (c->x86_model) { 823 switch (c->x86_model) {
432#ifdef __i386__
433 case 0xD: 824 case 0xD:
434 case 0xE: /* Pentium M */ 825 case 0xE: /* Pentium M */
435 ds_configure(&ds_cfg_pentium_m); 826 ds_configure(&ds_cfg_var);
436 break; 827 break;
437#endif /* _i386_ */
438 case 0xF: /* Core2 */ 828 case 0xF: /* Core2 */
439 ds_configure(&ds_cfg_core2); 829 case 0x1C: /* Atom */
830 ds_configure(&ds_cfg_64);
440 break; 831 break;
441 default: 832 default:
442 /* sorry, don't know about them */ 833 /* sorry, don't know about them */
@@ -445,13 +836,11 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
445 break; 836 break;
446 case 0xF: 837 case 0xF:
447 switch (c->x86_model) { 838 switch (c->x86_model) {
448#ifdef __i386__
449 case 0x0: 839 case 0x0:
450 case 0x1: 840 case 0x1:
451 case 0x2: /* Netburst */ 841 case 0x2: /* Netburst */
452 ds_configure(&ds_cfg_netburst); 842 ds_configure(&ds_cfg_var);
453 break; 843 break;
454#endif /* _i386_ */
455 default: 844 default:
456 /* sorry, don't know about them */ 845 /* sorry, don't know about them */
457 break; 846 break;
@@ -462,3 +851,14 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
462 break; 851 break;
463 } 852 }
464} 853}
854
855void ds_free(struct ds_context *context)
856{
857 /* This is called when the task owning the parameter context
858 * is dying. There should not be any user of that context left
859 * to disturb us, anymore. */
860 unsigned long leftovers = context->count;
861 while (leftovers--)
862 ds_put_context(context);
863}
864#endif /* CONFIG_X86_DS */
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
new file mode 100644
index 000000000000..1a78180f08d3
--- /dev/null
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -0,0 +1,449 @@
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 */
5#include <linux/kallsyms.h>
6#include <linux/kprobes.h>
7#include <linux/uaccess.h>
8#include <linux/utsname.h>
9#include <linux/hardirq.h>
10#include <linux/kdebug.h>
11#include <linux/module.h>
12#include <linux/ptrace.h>
13#include <linux/kexec.h>
14#include <linux/bug.h>
15#include <linux/nmi.h>
16#include <linux/sysfs.h>
17
18#include <asm/stacktrace.h>
19
20#define STACKSLOTS_PER_LINE 8
21#define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :)
22
23int panic_on_unrecovered_nmi;
24int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
25static unsigned int code_bytes = 64;
26static int die_counter;
27
28void printk_address(unsigned long address, int reliable)
29{
30 printk(" [<%p>] %s%pS\n", (void *) address,
31 reliable ? "" : "? ", (void *) address);
32}
33
34static inline int valid_stack_ptr(struct thread_info *tinfo,
35 void *p, unsigned int size, void *end)
36{
37 void *t = tinfo;
38 if (end) {
39 if (p < end && p >= (end-THREAD_SIZE))
40 return 1;
41 else
42 return 0;
43 }
44 return p > t && p < t + THREAD_SIZE - size;
45}
46
47/* The form of the top of the frame on the stack */
48struct stack_frame {
49 struct stack_frame *next_frame;
50 unsigned long return_address;
51};
52
53static inline unsigned long
54print_context_stack(struct thread_info *tinfo,
55 unsigned long *stack, unsigned long bp,
56 const struct stacktrace_ops *ops, void *data,
57 unsigned long *end)
58{
59 struct stack_frame *frame = (struct stack_frame *)bp;
60
61 while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
62 unsigned long addr;
63
64 addr = *stack;
65 if (__kernel_text_address(addr)) {
66 if ((unsigned long) stack == bp + sizeof(long)) {
67 ops->address(data, addr, 1);
68 frame = frame->next_frame;
69 bp = (unsigned long) frame;
70 } else {
71 ops->address(data, addr, bp == 0);
72 }
73 }
74 stack++;
75 }
76 return bp;
77}
78
79void dump_trace(struct task_struct *task, struct pt_regs *regs,
80 unsigned long *stack, unsigned long bp,
81 const struct stacktrace_ops *ops, void *data)
82{
83 if (!task)
84 task = current;
85
86 if (!stack) {
87 unsigned long dummy;
88 stack = &dummy;
89 if (task && task != current)
90 stack = (unsigned long *)task->thread.sp;
91 }
92
93#ifdef CONFIG_FRAME_POINTER
94 if (!bp) {
95 if (task == current) {
96 /* Grab bp right from our regs */
97 get_bp(bp);
98 } else {
99 /* bp is the last reg pushed by switch_to */
100 bp = *(unsigned long *) task->thread.sp;
101 }
102 }
103#endif
104
105 for (;;) {
106 struct thread_info *context;
107
108 context = (struct thread_info *)
109 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
110 bp = print_context_stack(context, stack, bp, ops, data, NULL);
111
112 stack = (unsigned long *)context->previous_esp;
113 if (!stack)
114 break;
115 if (ops->stack(data, "IRQ") < 0)
116 break;
117 touch_nmi_watchdog();
118 }
119}
120EXPORT_SYMBOL(dump_trace);
121
122static void
123print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
124{
125 printk(data);
126 print_symbol(msg, symbol);
127 printk("\n");
128}
129
130static void print_trace_warning(void *data, char *msg)
131{
132 printk("%s%s\n", (char *)data, msg);
133}
134
135static int print_trace_stack(void *data, char *name)
136{
137 printk("%s <%s> ", (char *)data, name);
138 return 0;
139}
140
141/*
142 * Print one address/symbol entries per line.
143 */
144static void print_trace_address(void *data, unsigned long addr, int reliable)
145{
146 touch_nmi_watchdog();
147 printk(data);
148 printk_address(addr, reliable);
149}
150
151static const struct stacktrace_ops print_trace_ops = {
152 .warning = print_trace_warning,
153 .warning_symbol = print_trace_warning_symbol,
154 .stack = print_trace_stack,
155 .address = print_trace_address,
156};
157
158static void
159show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
160 unsigned long *stack, unsigned long bp, char *log_lvl)
161{
162 printk("%sCall Trace:\n", log_lvl);
163 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
164}
165
166void show_trace(struct task_struct *task, struct pt_regs *regs,
167 unsigned long *stack, unsigned long bp)
168{
169 show_trace_log_lvl(task, regs, stack, bp, "");
170}
171
172static void
173show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
174 unsigned long *sp, unsigned long bp, char *log_lvl)
175{
176 unsigned long *stack;
177 int i;
178
179 if (sp == NULL) {
180 if (task)
181 sp = (unsigned long *)task->thread.sp;
182 else
183 sp = (unsigned long *)&sp;
184 }
185
186 stack = sp;
187 for (i = 0; i < kstack_depth_to_print; i++) {
188 if (kstack_end(stack))
189 break;
190 if (i && ((i % STACKSLOTS_PER_LINE) == 0))
191 printk("\n%s", log_lvl);
192 printk(" %08lx", *stack++);
193 touch_nmi_watchdog();
194 }
195 printk("\n");
196 show_trace_log_lvl(task, regs, sp, bp, log_lvl);
197}
198
199void show_stack(struct task_struct *task, unsigned long *sp)
200{
201 show_stack_log_lvl(task, NULL, sp, 0, "");
202}
203
204/*
205 * The architecture-independent dump_stack generator
206 */
207void dump_stack(void)
208{
209 unsigned long bp = 0;
210 unsigned long stack;
211
212#ifdef CONFIG_FRAME_POINTER
213 if (!bp)
214 get_bp(bp);
215#endif
216
217 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
218 current->pid, current->comm, print_tainted(),
219 init_utsname()->release,
220 (int)strcspn(init_utsname()->version, " "),
221 init_utsname()->version);
222 show_trace(NULL, NULL, &stack, bp);
223}
224
225EXPORT_SYMBOL(dump_stack);
226
227void show_registers(struct pt_regs *regs)
228{
229 int i;
230
231 print_modules();
232 __show_regs(regs, 0);
233
234 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
235 TASK_COMM_LEN, current->comm, task_pid_nr(current),
236 current_thread_info(), current, task_thread_info(current));
237 /*
238 * When in-kernel, we also print out the stack and code at the
239 * time of the fault..
240 */
241 if (!user_mode_vm(regs)) {
242 unsigned int code_prologue = code_bytes * 43 / 64;
243 unsigned int code_len = code_bytes;
244 unsigned char c;
245 u8 *ip;
246
247 printk(KERN_EMERG "Stack:\n");
248 show_stack_log_lvl(NULL, regs, &regs->sp,
249 0, KERN_EMERG);
250
251 printk(KERN_EMERG "Code: ");
252
253 ip = (u8 *)regs->ip - code_prologue;
254 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
255 /* try starting at IP */
256 ip = (u8 *)regs->ip;
257 code_len = code_len - code_prologue + 1;
258 }
259 for (i = 0; i < code_len; i++, ip++) {
260 if (ip < (u8 *)PAGE_OFFSET ||
261 probe_kernel_address(ip, c)) {
262 printk(" Bad EIP value.");
263 break;
264 }
265 if (ip == (u8 *)regs->ip)
266 printk("<%02x> ", c);
267 else
268 printk("%02x ", c);
269 }
270 }
271 printk("\n");
272}
273
274int is_valid_bugaddr(unsigned long ip)
275{
276 unsigned short ud2;
277
278 if (ip < PAGE_OFFSET)
279 return 0;
280 if (probe_kernel_address((unsigned short *)ip, ud2))
281 return 0;
282
283 return ud2 == 0x0b0f;
284}
285
286static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
287static int die_owner = -1;
288static unsigned int die_nest_count;
289
290unsigned __kprobes long oops_begin(void)
291{
292 unsigned long flags;
293
294 oops_enter();
295
296 if (die_owner != raw_smp_processor_id()) {
297 console_verbose();
298 raw_local_irq_save(flags);
299 __raw_spin_lock(&die_lock);
300 die_owner = smp_processor_id();
301 die_nest_count = 0;
302 bust_spinlocks(1);
303 } else {
304 raw_local_irq_save(flags);
305 }
306 die_nest_count++;
307 return flags;
308}
309
310void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
311{
312 bust_spinlocks(0);
313 die_owner = -1;
314 add_taint(TAINT_DIE);
315 __raw_spin_unlock(&die_lock);
316 raw_local_irq_restore(flags);
317
318 if (!regs)
319 return;
320
321 if (kexec_should_crash(current))
322 crash_kexec(regs);
323 if (in_interrupt())
324 panic("Fatal exception in interrupt");
325 if (panic_on_oops)
326 panic("Fatal exception");
327 oops_exit();
328 do_exit(signr);
329}
330
331int __kprobes __die(const char *str, struct pt_regs *regs, long err)
332{
333 unsigned short ss;
334 unsigned long sp;
335
336 printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
337#ifdef CONFIG_PREEMPT
338 printk("PREEMPT ");
339#endif
340#ifdef CONFIG_SMP
341 printk("SMP ");
342#endif
343#ifdef CONFIG_DEBUG_PAGEALLOC
344 printk("DEBUG_PAGEALLOC");
345#endif
346 printk("\n");
347 sysfs_printk_last_file();
348 if (notify_die(DIE_OOPS, str, regs, err,
349 current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
350 return 1;
351
352 show_registers(regs);
353 /* Executive summary in case the oops scrolled away */
354 sp = (unsigned long) (&regs->sp);
355 savesegment(ss, ss);
356 if (user_mode(regs)) {
357 sp = regs->sp;
358 ss = regs->ss & 0xffff;
359 }
360 printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
361 print_symbol("%s", regs->ip);
362 printk(" SS:ESP %04x:%08lx\n", ss, sp);
363 return 0;
364}
365
366/*
367 * This is gone through when something in the kernel has done something bad
368 * and is about to be terminated:
369 */
370void die(const char *str, struct pt_regs *regs, long err)
371{
372 unsigned long flags = oops_begin();
373
374 if (die_nest_count < 3) {
375 report_bug(regs->ip, regs);
376
377 if (__die(str, regs, err))
378 regs = NULL;
379 } else {
380 printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
381 }
382
383 oops_end(flags, regs, SIGSEGV);
384}
385
386static DEFINE_SPINLOCK(nmi_print_lock);
387
388void notrace __kprobes
389die_nmi(char *str, struct pt_regs *regs, int do_panic)
390{
391 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
392 return;
393
394 spin_lock(&nmi_print_lock);
395 /*
396 * We are in trouble anyway, lets at least try
397 * to get a message out:
398 */
399 bust_spinlocks(1);
400 printk(KERN_EMERG "%s", str);
401 printk(" on CPU%d, ip %08lx, registers:\n",
402 smp_processor_id(), regs->ip);
403 show_registers(regs);
404 if (do_panic)
405 panic("Non maskable interrupt");
406 console_silent();
407 spin_unlock(&nmi_print_lock);
408 bust_spinlocks(0);
409
410 /*
411 * If we are in kernel we are probably nested up pretty bad
412 * and might aswell get out now while we still can:
413 */
414 if (!user_mode_vm(regs)) {
415 current->thread.trap_no = 2;
416 crash_kexec(regs);
417 }
418
419 do_exit(SIGSEGV);
420}
421
422static int __init oops_setup(char *s)
423{
424 if (!s)
425 return -EINVAL;
426 if (!strcmp(s, "panic"))
427 panic_on_oops = 1;
428 return 0;
429}
430early_param("oops", oops_setup);
431
432static int __init kstack_setup(char *s)
433{
434 if (!s)
435 return -EINVAL;
436 kstack_depth_to_print = simple_strtoul(s, NULL, 0);
437 return 0;
438}
439early_param("kstack", kstack_setup);
440
441static int __init code_bytes_setup(char *s)
442{
443 code_bytes = simple_strtoul(s, NULL, 0);
444 if (code_bytes > 8192)
445 code_bytes = 8192;
446
447 return 1;
448}
449__setup("code_bytes=", code_bytes_setup);
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
new file mode 100644
index 000000000000..96a5db7da8a7
--- /dev/null
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -0,0 +1,575 @@
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 */
5#include <linux/kallsyms.h>
6#include <linux/kprobes.h>
7#include <linux/uaccess.h>
8#include <linux/utsname.h>
9#include <linux/hardirq.h>
10#include <linux/kdebug.h>
11#include <linux/module.h>
12#include <linux/ptrace.h>
13#include <linux/kexec.h>
14#include <linux/bug.h>
15#include <linux/nmi.h>
16#include <linux/sysfs.h>
17
18#include <asm/stacktrace.h>
19
20#define STACKSLOTS_PER_LINE 4
21#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
22
23int panic_on_unrecovered_nmi;
24int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
25static unsigned int code_bytes = 64;
26static int die_counter;
27
28void printk_address(unsigned long address, int reliable)
29{
30 printk(" [<%p>] %s%pS\n", (void *) address,
31 reliable ? "" : "? ", (void *) address);
32}
33
34static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
35 unsigned *usedp, char **idp)
36{
37 static char ids[][8] = {
38 [DEBUG_STACK - 1] = "#DB",
39 [NMI_STACK - 1] = "NMI",
40 [DOUBLEFAULT_STACK - 1] = "#DF",
41 [STACKFAULT_STACK - 1] = "#SS",
42 [MCE_STACK - 1] = "#MC",
43#if DEBUG_STKSZ > EXCEPTION_STKSZ
44 [N_EXCEPTION_STACKS ...
45 N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
46#endif
47 };
48 unsigned k;
49
50 /*
51 * Iterate over all exception stacks, and figure out whether
52 * 'stack' is in one of them:
53 */
54 for (k = 0; k < N_EXCEPTION_STACKS; k++) {
55 unsigned long end = per_cpu(orig_ist, cpu).ist[k];
56 /*
57 * Is 'stack' above this exception frame's end?
58 * If yes then skip to the next frame.
59 */
60 if (stack >= end)
61 continue;
62 /*
63 * Is 'stack' above this exception frame's start address?
64 * If yes then we found the right frame.
65 */
66 if (stack >= end - EXCEPTION_STKSZ) {
67 /*
68 * Make sure we only iterate through an exception
69 * stack once. If it comes up for the second time
70 * then there's something wrong going on - just
71 * break out and return NULL:
72 */
73 if (*usedp & (1U << k))
74 break;
75 *usedp |= 1U << k;
76 *idp = ids[k];
77 return (unsigned long *)end;
78 }
79 /*
80 * If this is a debug stack, and if it has a larger size than
81 * the usual exception stacks, then 'stack' might still
82 * be within the lower portion of the debug stack:
83 */
84#if DEBUG_STKSZ > EXCEPTION_STKSZ
85 if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
86 unsigned j = N_EXCEPTION_STACKS - 1;
87
88 /*
89 * Black magic. A large debug stack is composed of
90 * multiple exception stack entries, which we
91 * iterate through now. Dont look:
92 */
93 do {
94 ++j;
95 end -= EXCEPTION_STKSZ;
96 ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
97 } while (stack < end - EXCEPTION_STKSZ);
98 if (*usedp & (1U << j))
99 break;
100 *usedp |= 1U << j;
101 *idp = ids[j];
102 return (unsigned long *)end;
103 }
104#endif
105 }
106 return NULL;
107}
108
109/*
110 * x86-64 can have up to three kernel stacks:
111 * process stack
112 * interrupt stack
113 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
114 */
115
116static inline int valid_stack_ptr(struct thread_info *tinfo,
117 void *p, unsigned int size, void *end)
118{
119 void *t = tinfo;
120 if (end) {
121 if (p < end && p >= (end-THREAD_SIZE))
122 return 1;
123 else
124 return 0;
125 }
126 return p > t && p < t + THREAD_SIZE - size;
127}
128
129/* The form of the top of the frame on the stack */
130struct stack_frame {
131 struct stack_frame *next_frame;
132 unsigned long return_address;
133};
134
135static inline unsigned long
136print_context_stack(struct thread_info *tinfo,
137 unsigned long *stack, unsigned long bp,
138 const struct stacktrace_ops *ops, void *data,
139 unsigned long *end)
140{
141 struct stack_frame *frame = (struct stack_frame *)bp;
142
143 while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
144 unsigned long addr;
145
146 addr = *stack;
147 if (__kernel_text_address(addr)) {
148 if ((unsigned long) stack == bp + sizeof(long)) {
149 ops->address(data, addr, 1);
150 frame = frame->next_frame;
151 bp = (unsigned long) frame;
152 } else {
153 ops->address(data, addr, bp == 0);
154 }
155 }
156 stack++;
157 }
158 return bp;
159}
160
161void dump_trace(struct task_struct *task, struct pt_regs *regs,
162 unsigned long *stack, unsigned long bp,
163 const struct stacktrace_ops *ops, void *data)
164{
165 const unsigned cpu = get_cpu();
166 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
167 unsigned used = 0;
168 struct thread_info *tinfo;
169
170 if (!task)
171 task = current;
172
173 if (!stack) {
174 unsigned long dummy;
175 stack = &dummy;
176 if (task && task != current)
177 stack = (unsigned long *)task->thread.sp;
178 }
179
180#ifdef CONFIG_FRAME_POINTER
181 if (!bp) {
182 if (task == current) {
183 /* Grab bp right from our regs */
184 get_bp(bp);
185 } else {
186 /* bp is the last reg pushed by switch_to */
187 bp = *(unsigned long *) task->thread.sp;
188 }
189 }
190#endif
191
192 /*
193 * Print function call entries in all stacks, starting at the
194 * current stack address. If the stacks consist of nested
195 * exceptions
196 */
197 tinfo = task_thread_info(task);
198 for (;;) {
199 char *id;
200 unsigned long *estack_end;
201 estack_end = in_exception_stack(cpu, (unsigned long)stack,
202 &used, &id);
203
204 if (estack_end) {
205 if (ops->stack(data, id) < 0)
206 break;
207
208 bp = print_context_stack(tinfo, stack, bp, ops,
209 data, estack_end);
210 ops->stack(data, "<EOE>");
211 /*
212 * We link to the next stack via the
213 * second-to-last pointer (index -2 to end) in the
214 * exception stack:
215 */
216 stack = (unsigned long *) estack_end[-2];
217 continue;
218 }
219 if (irqstack_end) {
220 unsigned long *irqstack;
221 irqstack = irqstack_end -
222 (IRQSTACKSIZE - 64) / sizeof(*irqstack);
223
224 if (stack >= irqstack && stack < irqstack_end) {
225 if (ops->stack(data, "IRQ") < 0)
226 break;
227 bp = print_context_stack(tinfo, stack, bp,
228 ops, data, irqstack_end);
229 /*
230 * We link to the next stack (which would be
231 * the process stack normally) the last
232 * pointer (index -1 to end) in the IRQ stack:
233 */
234 stack = (unsigned long *) (irqstack_end[-1]);
235 irqstack_end = NULL;
236 ops->stack(data, "EOI");
237 continue;
238 }
239 }
240 break;
241 }
242
243 /*
244 * This handles the process stack:
245 */
246 bp = print_context_stack(tinfo, stack, bp, ops, data, NULL);
247 put_cpu();
248}
249EXPORT_SYMBOL(dump_trace);
250
251static void
252print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
253{
254 printk(data);
255 print_symbol(msg, symbol);
256 printk("\n");
257}
258
259static void print_trace_warning(void *data, char *msg)
260{
261 printk("%s%s\n", (char *)data, msg);
262}
263
264static int print_trace_stack(void *data, char *name)
265{
266 printk("%s <%s> ", (char *)data, name);
267 return 0;
268}
269
270/*
271 * Print one address/symbol entries per line.
272 */
273static void print_trace_address(void *data, unsigned long addr, int reliable)
274{
275 touch_nmi_watchdog();
276 printk(data);
277 printk_address(addr, reliable);
278}
279
280static const struct stacktrace_ops print_trace_ops = {
281 .warning = print_trace_warning,
282 .warning_symbol = print_trace_warning_symbol,
283 .stack = print_trace_stack,
284 .address = print_trace_address,
285};
286
287static void
288show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
289 unsigned long *stack, unsigned long bp, char *log_lvl)
290{
291 printk("%sCall Trace:\n", log_lvl);
292 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
293}
294
295void show_trace(struct task_struct *task, struct pt_regs *regs,
296 unsigned long *stack, unsigned long bp)
297{
298 show_trace_log_lvl(task, regs, stack, bp, "");
299}
300
301static void
302show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
303 unsigned long *sp, unsigned long bp, char *log_lvl)
304{
305 unsigned long *stack;
306 int i;
307 const int cpu = smp_processor_id();
308 unsigned long *irqstack_end =
309 (unsigned long *) (cpu_pda(cpu)->irqstackptr);
310 unsigned long *irqstack =
311 (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
312
313 /*
314 * debugging aid: "show_stack(NULL, NULL);" prints the
315 * back trace for this cpu.
316 */
317
318 if (sp == NULL) {
319 if (task)
320 sp = (unsigned long *)task->thread.sp;
321 else
322 sp = (unsigned long *)&sp;
323 }
324
325 stack = sp;
326 for (i = 0; i < kstack_depth_to_print; i++) {
327 if (stack >= irqstack && stack <= irqstack_end) {
328 if (stack == irqstack_end) {
329 stack = (unsigned long *) (irqstack_end[-1]);
330 printk(" <EOI> ");
331 }
332 } else {
333 if (((long) stack & (THREAD_SIZE-1)) == 0)
334 break;
335 }
336 if (i && ((i % STACKSLOTS_PER_LINE) == 0))
337 printk("\n%s", log_lvl);
338 printk(" %016lx", *stack++);
339 touch_nmi_watchdog();
340 }
341 printk("\n");
342 show_trace_log_lvl(task, regs, sp, bp, log_lvl);
343}
344
345void show_stack(struct task_struct *task, unsigned long *sp)
346{
347 show_stack_log_lvl(task, NULL, sp, 0, "");
348}
349
350/*
351 * The architecture-independent dump_stack generator
352 */
353void dump_stack(void)
354{
355 unsigned long bp = 0;
356 unsigned long stack;
357
358#ifdef CONFIG_FRAME_POINTER
359 if (!bp)
360 get_bp(bp);
361#endif
362
363 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
364 current->pid, current->comm, print_tainted(),
365 init_utsname()->release,
366 (int)strcspn(init_utsname()->version, " "),
367 init_utsname()->version);
368 show_trace(NULL, NULL, &stack, bp);
369}
370EXPORT_SYMBOL(dump_stack);
371
372void show_registers(struct pt_regs *regs)
373{
374 int i;
375 unsigned long sp;
376 const int cpu = smp_processor_id();
377 struct task_struct *cur = cpu_pda(cpu)->pcurrent;
378
379 sp = regs->sp;
380 printk("CPU %d ", cpu);
381 __show_regs(regs, 1);
382 printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
383 cur->comm, cur->pid, task_thread_info(cur), cur);
384
385 /*
386 * When in-kernel, we also print out the stack and code at the
387 * time of the fault..
388 */
389 if (!user_mode(regs)) {
390 unsigned int code_prologue = code_bytes * 43 / 64;
391 unsigned int code_len = code_bytes;
392 unsigned char c;
393 u8 *ip;
394
395 printk(KERN_EMERG "Stack:\n");
396 show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
397 regs->bp, KERN_EMERG);
398
399 printk(KERN_EMERG "Code: ");
400
401 ip = (u8 *)regs->ip - code_prologue;
402 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
403 /* try starting at IP */
404 ip = (u8 *)regs->ip;
405 code_len = code_len - code_prologue + 1;
406 }
407 for (i = 0; i < code_len; i++, ip++) {
408 if (ip < (u8 *)PAGE_OFFSET ||
409 probe_kernel_address(ip, c)) {
410 printk(" Bad RIP value.");
411 break;
412 }
413 if (ip == (u8 *)regs->ip)
414 printk("<%02x> ", c);
415 else
416 printk("%02x ", c);
417 }
418 }
419 printk("\n");
420}
421
422int is_valid_bugaddr(unsigned long ip)
423{
424 unsigned short ud2;
425
426 if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
427 return 0;
428
429 return ud2 == 0x0b0f;
430}
431
432static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
433static int die_owner = -1;
434static unsigned int die_nest_count;
435
436unsigned __kprobes long oops_begin(void)
437{
438 int cpu;
439 unsigned long flags;
440
441 oops_enter();
442
443 /* racy, but better than risking deadlock. */
444 raw_local_irq_save(flags);
445 cpu = smp_processor_id();
446 if (!__raw_spin_trylock(&die_lock)) {
447 if (cpu == die_owner)
448 /* nested oops. should stop eventually */;
449 else
450 __raw_spin_lock(&die_lock);
451 }
452 die_nest_count++;
453 die_owner = cpu;
454 console_verbose();
455 bust_spinlocks(1);
456 return flags;
457}
458
459void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
460{
461 die_owner = -1;
462 bust_spinlocks(0);
463 die_nest_count--;
464 if (!die_nest_count)
465 /* Nest count reaches zero, release the lock. */
466 __raw_spin_unlock(&die_lock);
467 raw_local_irq_restore(flags);
468 if (!regs) {
469 oops_exit();
470 return;
471 }
472 if (in_interrupt())
473 panic("Fatal exception in interrupt");
474 if (panic_on_oops)
475 panic("Fatal exception");
476 oops_exit();
477 do_exit(signr);
478}
479
480int __kprobes __die(const char *str, struct pt_regs *regs, long err)
481{
482 printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
483#ifdef CONFIG_PREEMPT
484 printk("PREEMPT ");
485#endif
486#ifdef CONFIG_SMP
487 printk("SMP ");
488#endif
489#ifdef CONFIG_DEBUG_PAGEALLOC
490 printk("DEBUG_PAGEALLOC");
491#endif
492 printk("\n");
493 sysfs_printk_last_file();
494 if (notify_die(DIE_OOPS, str, regs, err,
495 current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
496 return 1;
497
498 show_registers(regs);
499 add_taint(TAINT_DIE);
500 /* Executive summary in case the oops scrolled away */
501 printk(KERN_ALERT "RIP ");
502 printk_address(regs->ip, 1);
503 printk(" RSP <%016lx>\n", regs->sp);
504 if (kexec_should_crash(current))
505 crash_kexec(regs);
506 return 0;
507}
508
509void die(const char *str, struct pt_regs *regs, long err)
510{
511 unsigned long flags = oops_begin();
512
513 if (!user_mode(regs))
514 report_bug(regs->ip, regs);
515
516 if (__die(str, regs, err))
517 regs = NULL;
518 oops_end(flags, regs, SIGSEGV);
519}
520
521notrace __kprobes void
522die_nmi(char *str, struct pt_regs *regs, int do_panic)
523{
524 unsigned long flags;
525
526 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
527 return;
528
529 flags = oops_begin();
530 /*
531 * We are in trouble anyway, lets at least try
532 * to get a message out.
533 */
534 printk(KERN_EMERG "%s", str);
535 printk(" on CPU%d, ip %08lx, registers:\n",
536 smp_processor_id(), regs->ip);
537 show_registers(regs);
538 if (kexec_should_crash(current))
539 crash_kexec(regs);
540 if (do_panic || panic_on_oops)
541 panic("Non maskable interrupt");
542 oops_end(flags, NULL, SIGBUS);
543 nmi_exit();
544 local_irq_enable();
545 do_exit(SIGBUS);
546}
547
548static int __init oops_setup(char *s)
549{
550 if (!s)
551 return -EINVAL;
552 if (!strcmp(s, "panic"))
553 panic_on_oops = 1;
554 return 0;
555}
556early_param("oops", oops_setup);
557
558static int __init kstack_setup(char *s)
559{
560 if (!s)
561 return -EINVAL;
562 kstack_depth_to_print = simple_strtoul(s, NULL, 0);
563 return 0;
564}
565early_param("kstack", kstack_setup);
566
567static int __init code_bytes_setup(char *s)
568{
569 code_bytes = simple_strtoul(s, NULL, 0);
570 if (code_bytes > 8192)
571 code_bytes = 8192;
572
573 return 1;
574}
575__setup("code_bytes=", code_bytes_setup);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 66e48aa2dd1b..ce97bf3bed12 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -148,6 +148,9 @@ void __init e820_print_map(char *who)
148 case E820_NVS: 148 case E820_NVS:
149 printk(KERN_CONT "(ACPI NVS)\n"); 149 printk(KERN_CONT "(ACPI NVS)\n");
150 break; 150 break;
151 case E820_UNUSABLE:
152 printk("(unusable)\n");
153 break;
151 default: 154 default:
152 printk(KERN_CONT "type %u\n", e820.map[i].type); 155 printk(KERN_CONT "type %u\n", e820.map[i].type);
153 break; 156 break;
@@ -1260,6 +1263,7 @@ static inline const char *e820_type_to_string(int e820_type)
1260 case E820_RAM: return "System RAM"; 1263 case E820_RAM: return "System RAM";
1261 case E820_ACPI: return "ACPI Tables"; 1264 case E820_ACPI: return "ACPI Tables";
1262 case E820_NVS: return "ACPI Non-volatile Storage"; 1265 case E820_NVS: return "ACPI Non-volatile Storage";
1266 case E820_UNUSABLE: return "Unusable memory";
1263 default: return "reserved"; 1267 default: return "reserved";
1264 } 1268 }
1265} 1269}
@@ -1267,6 +1271,7 @@ static inline const char *e820_type_to_string(int e820_type)
1267/* 1271/*
1268 * Mark e820 reserved areas as busy for the resource manager. 1272 * Mark e820 reserved areas as busy for the resource manager.
1269 */ 1273 */
1274static struct resource __initdata *e820_res;
1270void __init e820_reserve_resources(void) 1275void __init e820_reserve_resources(void)
1271{ 1276{
1272 int i; 1277 int i;
@@ -1274,20 +1279,26 @@ void __init e820_reserve_resources(void)
1274 u64 end; 1279 u64 end;
1275 1280
1276 res = alloc_bootmem_low(sizeof(struct resource) * e820.nr_map); 1281 res = alloc_bootmem_low(sizeof(struct resource) * e820.nr_map);
1282 e820_res = res;
1277 for (i = 0; i < e820.nr_map; i++) { 1283 for (i = 0; i < e820.nr_map; i++) {
1278 end = e820.map[i].addr + e820.map[i].size - 1; 1284 end = e820.map[i].addr + e820.map[i].size - 1;
1279#ifndef CONFIG_RESOURCES_64BIT 1285 if (end != (resource_size_t)end) {
1280 if (end > 0x100000000ULL) {
1281 res++; 1286 res++;
1282 continue; 1287 continue;
1283 } 1288 }
1284#endif
1285 res->name = e820_type_to_string(e820.map[i].type); 1289 res->name = e820_type_to_string(e820.map[i].type);
1286 res->start = e820.map[i].addr; 1290 res->start = e820.map[i].addr;
1287 res->end = end; 1291 res->end = end;
1288 1292
1289 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 1293 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
1290 insert_resource(&iomem_resource, res); 1294
1295 /*
1296 * don't register the region that could be conflicted with
1297 * pci device BAR resource and insert them later in
1298 * pcibios_resource_survey()
1299 */
1300 if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20))
1301 insert_resource(&iomem_resource, res);
1291 res++; 1302 res++;
1292 } 1303 }
1293 1304
@@ -1299,6 +1310,19 @@ void __init e820_reserve_resources(void)
1299 } 1310 }
1300} 1311}
1301 1312
1313void __init e820_reserve_resources_late(void)
1314{
1315 int i;
1316 struct resource *res;
1317
1318 res = e820_res;
1319 for (i = 0; i < e820.nr_map; i++) {
1320 if (!res->parent && res->end)
1321 reserve_region_with_split(&iomem_resource, res->start, res->end, res->name);
1322 res++;
1323 }
1324}
1325
1302char *__init default_machine_specific_memory_setup(void) 1326char *__init default_machine_specific_memory_setup(void)
1303{ 1327{
1304 char *who = "BIOS-e820"; 1328 char *who = "BIOS-e820";
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 4353cf5e6fac..733c4f8d42ea 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -95,6 +95,66 @@ static void __init nvidia_bugs(int num, int slot, int func)
95 95
96} 96}
97 97
98static u32 ati_ixp4x0_rev(int num, int slot, int func)
99{
100 u32 d;
101 u8 b;
102
103 b = read_pci_config_byte(num, slot, func, 0xac);
104 b &= ~(1<<5);
105 write_pci_config_byte(num, slot, func, 0xac, b);
106
107 d = read_pci_config(num, slot, func, 0x70);
108 d |= 1<<8;
109 write_pci_config(num, slot, func, 0x70, d);
110
111 d = read_pci_config(num, slot, func, 0x8);
112 d &= 0xff;
113 return d;
114}
115
116static void __init ati_bugs(int num, int slot, int func)
117{
118#if defined(CONFIG_ACPI) && defined (CONFIG_X86_IO_APIC)
119 u32 d;
120 u8 b;
121
122 if (acpi_use_timer_override)
123 return;
124
125 d = ati_ixp4x0_rev(num, slot, func);
126 if (d < 0x82)
127 acpi_skip_timer_override = 1;
128 else {
129 /* check for IRQ0 interrupt swap */
130 outb(0x72, 0xcd6); b = inb(0xcd7);
131 if (!(b & 0x2))
132 acpi_skip_timer_override = 1;
133 }
134
135 if (acpi_skip_timer_override) {
136 printk(KERN_INFO "SB4X0 revision 0x%x\n", d);
137 printk(KERN_INFO "Ignoring ACPI timer override.\n");
138 printk(KERN_INFO "If you got timer trouble "
139 "try acpi_use_timer_override\n");
140 }
141#endif
142}
143
144#ifdef CONFIG_DMAR
145static void __init intel_g33_dmar(int num, int slot, int func)
146{
147 struct acpi_table_header *dmar_tbl;
148 acpi_status status;
149
150 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
151 if (ACPI_SUCCESS(status)) {
152 printk(KERN_INFO "BIOS BUG: DMAR advertised on Intel G31/G33 chipset -- ignoring\n");
153 dmar_disabled = 1;
154 }
155}
156#endif
157
98#define QFLAG_APPLY_ONCE 0x1 158#define QFLAG_APPLY_ONCE 0x1
99#define QFLAG_APPLIED 0x2 159#define QFLAG_APPLIED 0x2
100#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) 160#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -114,6 +174,12 @@ static struct chipset early_qrk[] __initdata = {
114 PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, via_bugs }, 174 PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, via_bugs },
115 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB, 175 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
116 PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, fix_hypertransport_config }, 176 PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, fix_hypertransport_config },
177 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
178 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
179#ifdef CONFIG_DMAR
180 { PCI_VENDOR_ID_INTEL, 0x29c0,
181 PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, intel_g33_dmar },
182#endif
117 {} 183 {}
118}; 184};
119 185
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index ff9e7350da54..34ad997d3834 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -3,11 +3,19 @@
3#include <linux/init.h> 3#include <linux/init.h>
4#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/screen_info.h> 5#include <linux/screen_info.h>
6#include <linux/usb/ch9.h>
7#include <linux/pci_regs.h>
8#include <linux/pci_ids.h>
9#include <linux/errno.h>
6#include <asm/io.h> 10#include <asm/io.h>
7#include <asm/processor.h> 11#include <asm/processor.h>
8#include <asm/fcntl.h> 12#include <asm/fcntl.h>
9#include <asm/setup.h> 13#include <asm/setup.h>
10#include <xen/hvc-console.h> 14#include <xen/hvc-console.h>
15#include <asm/pci-direct.h>
16#include <asm/pgtable.h>
17#include <asm/fixmap.h>
18#include <linux/usb/ehci_def.h>
11 19
12/* Simple VGA output */ 20/* Simple VGA output */
13#define VGABASE (__ISA_IO_base + 0xb8000) 21#define VGABASE (__ISA_IO_base + 0xb8000)
@@ -78,6 +86,7 @@ static int early_serial_base = 0x3f8; /* ttyS0 */
78static int early_serial_putc(unsigned char ch) 86static int early_serial_putc(unsigned char ch)
79{ 87{
80 unsigned timeout = 0xffff; 88 unsigned timeout = 0xffff;
89
81 while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout) 90 while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
82 cpu_relax(); 91 cpu_relax();
83 outb(ch, early_serial_base + TXR); 92 outb(ch, early_serial_base + TXR);
@@ -111,7 +120,7 @@ static __init void early_serial_init(char *s)
111 if (!strncmp(s, "0x", 2)) { 120 if (!strncmp(s, "0x", 2)) {
112 early_serial_base = simple_strtoul(s, &e, 16); 121 early_serial_base = simple_strtoul(s, &e, 16);
113 } else { 122 } else {
114 static int bases[] = { 0x3f8, 0x2f8 }; 123 static const int __initconst bases[] = { 0x3f8, 0x2f8 };
115 124
116 if (!strncmp(s, "ttyS", 4)) 125 if (!strncmp(s, "ttyS", 4))
117 s += 4; 126 s += 4;
@@ -151,6 +160,721 @@ static struct console early_serial_console = {
151 .index = -1, 160 .index = -1,
152}; 161};
153 162
163#ifdef CONFIG_EARLY_PRINTK_DBGP
164
165static struct ehci_caps __iomem *ehci_caps;
166static struct ehci_regs __iomem *ehci_regs;
167static struct ehci_dbg_port __iomem *ehci_debug;
168static unsigned int dbgp_endpoint_out;
169
170struct ehci_dev {
171 u32 bus;
172 u32 slot;
173 u32 func;
174};
175
176static struct ehci_dev ehci_dev;
177
178#define USB_DEBUG_DEVNUM 127
179
180#define DBGP_DATA_TOGGLE 0x8800
181
182static inline u32 dbgp_pid_update(u32 x, u32 tok)
183{
184 return ((x ^ DBGP_DATA_TOGGLE) & 0xffff00) | (tok & 0xff);
185}
186
187static inline u32 dbgp_len_update(u32 x, u32 len)
188{
189 return (x & ~0x0f) | (len & 0x0f);
190}
191
192/*
193 * USB Packet IDs (PIDs)
194 */
195
196/* token */
197#define USB_PID_OUT 0xe1
198#define USB_PID_IN 0x69
199#define USB_PID_SOF 0xa5
200#define USB_PID_SETUP 0x2d
201/* handshake */
202#define USB_PID_ACK 0xd2
203#define USB_PID_NAK 0x5a
204#define USB_PID_STALL 0x1e
205#define USB_PID_NYET 0x96
206/* data */
207#define USB_PID_DATA0 0xc3
208#define USB_PID_DATA1 0x4b
209#define USB_PID_DATA2 0x87
210#define USB_PID_MDATA 0x0f
211/* Special */
212#define USB_PID_PREAMBLE 0x3c
213#define USB_PID_ERR 0x3c
214#define USB_PID_SPLIT 0x78
215#define USB_PID_PING 0xb4
216#define USB_PID_UNDEF_0 0xf0
217
218#define USB_PID_DATA_TOGGLE 0x88
219#define DBGP_CLAIM (DBGP_OWNER | DBGP_ENABLED | DBGP_INUSE)
220
221#define PCI_CAP_ID_EHCI_DEBUG 0xa
222
223#define HUB_ROOT_RESET_TIME 50 /* times are in msec */
224#define HUB_SHORT_RESET_TIME 10
225#define HUB_LONG_RESET_TIME 200
226#define HUB_RESET_TIMEOUT 500
227
228#define DBGP_MAX_PACKET 8
229
230static int dbgp_wait_until_complete(void)
231{
232 u32 ctrl;
233 int loop = 0x100000;
234
235 do {
236 ctrl = readl(&ehci_debug->control);
237 /* Stop when the transaction is finished */
238 if (ctrl & DBGP_DONE)
239 break;
240 } while (--loop > 0);
241
242 if (!loop)
243 return -1;
244
245 /*
246 * Now that we have observed the completed transaction,
247 * clear the done bit.
248 */
249 writel(ctrl | DBGP_DONE, &ehci_debug->control);
250 return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl);
251}
252
253static void dbgp_mdelay(int ms)
254{
255 int i;
256
257 while (ms--) {
258 for (i = 0; i < 1000; i++)
259 outb(0x1, 0x80);
260 }
261}
262
263static void dbgp_breath(void)
264{
265 /* Sleep to give the debug port a chance to breathe */
266}
267
268static int dbgp_wait_until_done(unsigned ctrl)
269{
270 u32 pids, lpid;
271 int ret;
272 int loop = 3;
273
274retry:
275 writel(ctrl | DBGP_GO, &ehci_debug->control);
276 ret = dbgp_wait_until_complete();
277 pids = readl(&ehci_debug->pids);
278 lpid = DBGP_PID_GET(pids);
279
280 if (ret < 0)
281 return ret;
282
283 /*
284 * If the port is getting full or it has dropped data
285 * start pacing ourselves, not necessary but it's friendly.
286 */
287 if ((lpid == USB_PID_NAK) || (lpid == USB_PID_NYET))
288 dbgp_breath();
289
290 /* If I get a NACK reissue the transmission */
291 if (lpid == USB_PID_NAK) {
292 if (--loop > 0)
293 goto retry;
294 }
295
296 return ret;
297}
298
299static void dbgp_set_data(const void *buf, int size)
300{
301 const unsigned char *bytes = buf;
302 u32 lo, hi;
303 int i;
304
305 lo = hi = 0;
306 for (i = 0; i < 4 && i < size; i++)
307 lo |= bytes[i] << (8*i);
308 for (; i < 8 && i < size; i++)
309 hi |= bytes[i] << (8*(i - 4));
310 writel(lo, &ehci_debug->data03);
311 writel(hi, &ehci_debug->data47);
312}
313
314static void dbgp_get_data(void *buf, int size)
315{
316 unsigned char *bytes = buf;
317 u32 lo, hi;
318 int i;
319
320 lo = readl(&ehci_debug->data03);
321 hi = readl(&ehci_debug->data47);
322 for (i = 0; i < 4 && i < size; i++)
323 bytes[i] = (lo >> (8*i)) & 0xff;
324 for (; i < 8 && i < size; i++)
325 bytes[i] = (hi >> (8*(i - 4))) & 0xff;
326}
327
328static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
329 const char *bytes, int size)
330{
331 u32 pids, addr, ctrl;
332 int ret;
333
334 if (size > DBGP_MAX_PACKET)
335 return -1;
336
337 addr = DBGP_EPADDR(devnum, endpoint);
338
339 pids = readl(&ehci_debug->pids);
340 pids = dbgp_pid_update(pids, USB_PID_OUT);
341
342 ctrl = readl(&ehci_debug->control);
343 ctrl = dbgp_len_update(ctrl, size);
344 ctrl |= DBGP_OUT;
345 ctrl |= DBGP_GO;
346
347 dbgp_set_data(bytes, size);
348 writel(addr, &ehci_debug->address);
349 writel(pids, &ehci_debug->pids);
350
351 ret = dbgp_wait_until_done(ctrl);
352 if (ret < 0)
353 return ret;
354
355 return ret;
356}
357
358static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
359 int size)
360{
361 u32 pids, addr, ctrl;
362 int ret;
363
364 if (size > DBGP_MAX_PACKET)
365 return -1;
366
367 addr = DBGP_EPADDR(devnum, endpoint);
368
369 pids = readl(&ehci_debug->pids);
370 pids = dbgp_pid_update(pids, USB_PID_IN);
371
372 ctrl = readl(&ehci_debug->control);
373 ctrl = dbgp_len_update(ctrl, size);
374 ctrl &= ~DBGP_OUT;
375 ctrl |= DBGP_GO;
376
377 writel(addr, &ehci_debug->address);
378 writel(pids, &ehci_debug->pids);
379 ret = dbgp_wait_until_done(ctrl);
380 if (ret < 0)
381 return ret;
382
383 if (size > ret)
384 size = ret;
385 dbgp_get_data(data, size);
386 return ret;
387}
388
389static int dbgp_control_msg(unsigned devnum, int requesttype, int request,
390 int value, int index, void *data, int size)
391{
392 u32 pids, addr, ctrl;
393 struct usb_ctrlrequest req;
394 int read;
395 int ret;
396
397 read = (requesttype & USB_DIR_IN) != 0;
398 if (size > (read ? DBGP_MAX_PACKET:0))
399 return -1;
400
401 /* Compute the control message */
402 req.bRequestType = requesttype;
403 req.bRequest = request;
404 req.wValue = cpu_to_le16(value);
405 req.wIndex = cpu_to_le16(index);
406 req.wLength = cpu_to_le16(size);
407
408 pids = DBGP_PID_SET(USB_PID_DATA0, USB_PID_SETUP);
409 addr = DBGP_EPADDR(devnum, 0);
410
411 ctrl = readl(&ehci_debug->control);
412 ctrl = dbgp_len_update(ctrl, sizeof(req));
413 ctrl |= DBGP_OUT;
414 ctrl |= DBGP_GO;
415
416 /* Send the setup message */
417 dbgp_set_data(&req, sizeof(req));
418 writel(addr, &ehci_debug->address);
419 writel(pids, &ehci_debug->pids);
420 ret = dbgp_wait_until_done(ctrl);
421 if (ret < 0)
422 return ret;
423
424 /* Read the result */
425 return dbgp_bulk_read(devnum, 0, data, size);
426}
427
428
429/* Find a PCI capability */
430static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap)
431{
432 u8 pos;
433 int bytes;
434
435 if (!(read_pci_config_16(num, slot, func, PCI_STATUS) &
436 PCI_STATUS_CAP_LIST))
437 return 0;
438
439 pos = read_pci_config_byte(num, slot, func, PCI_CAPABILITY_LIST);
440 for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) {
441 u8 id;
442
443 pos &= ~3;
444 id = read_pci_config_byte(num, slot, func, pos+PCI_CAP_LIST_ID);
445 if (id == 0xff)
446 break;
447 if (id == cap)
448 return pos;
449
450 pos = read_pci_config_byte(num, slot, func,
451 pos+PCI_CAP_LIST_NEXT);
452 }
453 return 0;
454}
455
456static u32 __init __find_dbgp(u32 bus, u32 slot, u32 func)
457{
458 u32 class;
459
460 class = read_pci_config(bus, slot, func, PCI_CLASS_REVISION);
461 if ((class >> 8) != PCI_CLASS_SERIAL_USB_EHCI)
462 return 0;
463
464 return find_cap(bus, slot, func, PCI_CAP_ID_EHCI_DEBUG);
465}
466
467static u32 __init find_dbgp(int ehci_num, u32 *rbus, u32 *rslot, u32 *rfunc)
468{
469 u32 bus, slot, func;
470
471 for (bus = 0; bus < 256; bus++) {
472 for (slot = 0; slot < 32; slot++) {
473 for (func = 0; func < 8; func++) {
474 unsigned cap;
475
476 cap = __find_dbgp(bus, slot, func);
477
478 if (!cap)
479 continue;
480 if (ehci_num-- != 0)
481 continue;
482 *rbus = bus;
483 *rslot = slot;
484 *rfunc = func;
485 return cap;
486 }
487 }
488 }
489 return 0;
490}
491
492static int ehci_reset_port(int port)
493{
494 u32 portsc;
495 u32 delay_time, delay;
496 int loop;
497
498 /* Reset the usb debug port */
499 portsc = readl(&ehci_regs->port_status[port - 1]);
500 portsc &= ~PORT_PE;
501 portsc |= PORT_RESET;
502 writel(portsc, &ehci_regs->port_status[port - 1]);
503
504 delay = HUB_ROOT_RESET_TIME;
505 for (delay_time = 0; delay_time < HUB_RESET_TIMEOUT;
506 delay_time += delay) {
507 dbgp_mdelay(delay);
508
509 portsc = readl(&ehci_regs->port_status[port - 1]);
510 if (portsc & PORT_RESET) {
511 /* force reset to complete */
512 loop = 2;
513 writel(portsc & ~(PORT_RWC_BITS | PORT_RESET),
514 &ehci_regs->port_status[port - 1]);
515 do {
516 portsc = readl(&ehci_regs->port_status[port-1]);
517 } while ((portsc & PORT_RESET) && (--loop > 0));
518 }
519
520 /* Device went away? */
521 if (!(portsc & PORT_CONNECT))
522 return -ENOTCONN;
523
524 /* bomb out completely if something weird happend */
525 if ((portsc & PORT_CSC))
526 return -EINVAL;
527
528 /* If we've finished resetting, then break out of the loop */
529 if (!(portsc & PORT_RESET) && (portsc & PORT_PE))
530 return 0;
531 }
532 return -EBUSY;
533}
534
535static int ehci_wait_for_port(int port)
536{
537 u32 status;
538 int ret, reps;
539
540 for (reps = 0; reps < 3; reps++) {
541 dbgp_mdelay(100);
542 status = readl(&ehci_regs->status);
543 if (status & STS_PCD) {
544 ret = ehci_reset_port(port);
545 if (ret == 0)
546 return 0;
547 }
548 }
549 return -ENOTCONN;
550}
551
552#ifdef DBGP_DEBUG
553# define dbgp_printk early_printk
554#else
555static inline void dbgp_printk(const char *fmt, ...) { }
556#endif
557
558typedef void (*set_debug_port_t)(int port);
559
560static void default_set_debug_port(int port)
561{
562}
563
564static set_debug_port_t set_debug_port = default_set_debug_port;
565
566static void nvidia_set_debug_port(int port)
567{
568 u32 dword;
569 dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
570 0x74);
571 dword &= ~(0x0f<<12);
572 dword |= ((port & 0x0f)<<12);
573 write_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, 0x74,
574 dword);
575 dbgp_printk("set debug port to %d\n", port);
576}
577
578static void __init detect_set_debug_port(void)
579{
580 u32 vendorid;
581
582 vendorid = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
583 0x00);
584
585 if ((vendorid & 0xffff) == 0x10de) {
586 dbgp_printk("using nvidia set_debug_port\n");
587 set_debug_port = nvidia_set_debug_port;
588 }
589}
590
591static int __init ehci_setup(void)
592{
593 struct usb_debug_descriptor dbgp_desc;
594 u32 cmd, ctrl, status, portsc, hcs_params;
595 u32 debug_port, new_debug_port = 0, n_ports;
596 u32 devnum;
597 int ret, i;
598 int loop;
599 int port_map_tried;
600 int playtimes = 3;
601
602try_next_time:
603 port_map_tried = 0;
604
605try_next_port:
606
607 hcs_params = readl(&ehci_caps->hcs_params);
608 debug_port = HCS_DEBUG_PORT(hcs_params);
609 n_ports = HCS_N_PORTS(hcs_params);
610
611 dbgp_printk("debug_port: %d\n", debug_port);
612 dbgp_printk("n_ports: %d\n", n_ports);
613
614 for (i = 1; i <= n_ports; i++) {
615 portsc = readl(&ehci_regs->port_status[i-1]);
616 dbgp_printk("portstatus%d: %08x\n", i, portsc);
617 }
618
619 if (port_map_tried && (new_debug_port != debug_port)) {
620 if (--playtimes) {
621 set_debug_port(new_debug_port);
622 goto try_next_time;
623 }
624 return -1;
625 }
626
627 loop = 10;
628 /* Reset the EHCI controller */
629 cmd = readl(&ehci_regs->command);
630 cmd |= CMD_RESET;
631 writel(cmd, &ehci_regs->command);
632 do {
633 cmd = readl(&ehci_regs->command);
634 } while ((cmd & CMD_RESET) && (--loop > 0));
635
636 if (!loop) {
637 dbgp_printk("can not reset ehci\n");
638 return -1;
639 }
640 dbgp_printk("ehci reset done\n");
641
642 /* Claim ownership, but do not enable yet */
643 ctrl = readl(&ehci_debug->control);
644 ctrl |= DBGP_OWNER;
645 ctrl &= ~(DBGP_ENABLED | DBGP_INUSE);
646 writel(ctrl, &ehci_debug->control);
647
648 /* Start the ehci running */
649 cmd = readl(&ehci_regs->command);
650 cmd &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE | CMD_ASE | CMD_RESET);
651 cmd |= CMD_RUN;
652 writel(cmd, &ehci_regs->command);
653
654 /* Ensure everything is routed to the EHCI */
655 writel(FLAG_CF, &ehci_regs->configured_flag);
656
657 /* Wait until the controller is no longer halted */
658 loop = 10;
659 do {
660 status = readl(&ehci_regs->status);
661 } while ((status & STS_HALT) && (--loop > 0));
662
663 if (!loop) {
664 dbgp_printk("ehci can be started\n");
665 return -1;
666 }
667 dbgp_printk("ehci started\n");
668
669 /* Wait for a device to show up in the debug port */
670 ret = ehci_wait_for_port(debug_port);
671 if (ret < 0) {
672 dbgp_printk("No device found in debug port\n");
673 goto next_debug_port;
674 }
675 dbgp_printk("ehci wait for port done\n");
676
677 /* Enable the debug port */
678 ctrl = readl(&ehci_debug->control);
679 ctrl |= DBGP_CLAIM;
680 writel(ctrl, &ehci_debug->control);
681 ctrl = readl(&ehci_debug->control);
682 if ((ctrl & DBGP_CLAIM) != DBGP_CLAIM) {
683 dbgp_printk("No device in debug port\n");
684 writel(ctrl & ~DBGP_CLAIM, &ehci_debug->control);
685 goto err;
686 }
687 dbgp_printk("debug ported enabled\n");
688
689 /* Completely transfer the debug device to the debug controller */
690 portsc = readl(&ehci_regs->port_status[debug_port - 1]);
691 portsc &= ~PORT_PE;
692 writel(portsc, &ehci_regs->port_status[debug_port - 1]);
693
694 dbgp_mdelay(100);
695
696 /* Find the debug device and make it device number 127 */
697 for (devnum = 0; devnum <= 127; devnum++) {
698 ret = dbgp_control_msg(devnum,
699 USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
700 USB_REQ_GET_DESCRIPTOR, (USB_DT_DEBUG << 8), 0,
701 &dbgp_desc, sizeof(dbgp_desc));
702 if (ret > 0)
703 break;
704 }
705 if (devnum > 127) {
706 dbgp_printk("Could not find attached debug device\n");
707 goto err;
708 }
709 if (ret < 0) {
710 dbgp_printk("Attached device is not a debug device\n");
711 goto err;
712 }
713 dbgp_endpoint_out = dbgp_desc.bDebugOutEndpoint;
714
715 /* Move the device to 127 if it isn't already there */
716 if (devnum != USB_DEBUG_DEVNUM) {
717 ret = dbgp_control_msg(devnum,
718 USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
719 USB_REQ_SET_ADDRESS, USB_DEBUG_DEVNUM, 0, NULL, 0);
720 if (ret < 0) {
721 dbgp_printk("Could not move attached device to %d\n",
722 USB_DEBUG_DEVNUM);
723 goto err;
724 }
725 devnum = USB_DEBUG_DEVNUM;
726 dbgp_printk("debug device renamed to 127\n");
727 }
728
729 /* Enable the debug interface */
730 ret = dbgp_control_msg(USB_DEBUG_DEVNUM,
731 USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
732 USB_REQ_SET_FEATURE, USB_DEVICE_DEBUG_MODE, 0, NULL, 0);
733 if (ret < 0) {
734 dbgp_printk(" Could not enable the debug device\n");
735 goto err;
736 }
737 dbgp_printk("debug interface enabled\n");
738
739 /* Perform a small write to get the even/odd data state in sync
740 */
741 ret = dbgp_bulk_write(USB_DEBUG_DEVNUM, dbgp_endpoint_out, " ", 1);
742 if (ret < 0) {
743 dbgp_printk("dbgp_bulk_write failed: %d\n", ret);
744 goto err;
745 }
746 dbgp_printk("small write doned\n");
747
748 return 0;
749err:
750 /* Things didn't work so remove my claim */
751 ctrl = readl(&ehci_debug->control);
752 ctrl &= ~(DBGP_CLAIM | DBGP_OUT);
753 writel(ctrl, &ehci_debug->control);
754 return -1;
755
756next_debug_port:
757 port_map_tried |= (1<<(debug_port - 1));
758 new_debug_port = ((debug_port-1+1)%n_ports) + 1;
759 if (port_map_tried != ((1<<n_ports) - 1)) {
760 set_debug_port(new_debug_port);
761 goto try_next_port;
762 }
763 if (--playtimes) {
764 set_debug_port(new_debug_port);
765 goto try_next_time;
766 }
767
768 return -1;
769}
770
771static int __init early_dbgp_init(char *s)
772{
773 u32 debug_port, bar, offset;
774 u32 bus, slot, func, cap;
775 void __iomem *ehci_bar;
776 u32 dbgp_num;
777 u32 bar_val;
778 char *e;
779 int ret;
780 u8 byte;
781
782 if (!early_pci_allowed())
783 return -1;
784
785 dbgp_num = 0;
786 if (*s)
787 dbgp_num = simple_strtoul(s, &e, 10);
788 dbgp_printk("dbgp_num: %d\n", dbgp_num);
789
790 cap = find_dbgp(dbgp_num, &bus, &slot, &func);
791 if (!cap)
792 return -1;
793
794 dbgp_printk("Found EHCI debug port on %02x:%02x.%1x\n", bus, slot,
795 func);
796
797 debug_port = read_pci_config(bus, slot, func, cap);
798 bar = (debug_port >> 29) & 0x7;
799 bar = (bar * 4) + 0xc;
800 offset = (debug_port >> 16) & 0xfff;
801 dbgp_printk("bar: %02x offset: %03x\n", bar, offset);
802 if (bar != PCI_BASE_ADDRESS_0) {
803 dbgp_printk("only debug ports on bar 1 handled.\n");
804
805 return -1;
806 }
807
808 bar_val = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
809 dbgp_printk("bar_val: %02x offset: %03x\n", bar_val, offset);
810 if (bar_val & ~PCI_BASE_ADDRESS_MEM_MASK) {
811 dbgp_printk("only simple 32bit mmio bars supported\n");
812
813 return -1;
814 }
815
816 /* double check if the mem space is enabled */
817 byte = read_pci_config_byte(bus, slot, func, 0x04);
818 if (!(byte & 0x2)) {
819 byte |= 0x02;
820 write_pci_config_byte(bus, slot, func, 0x04, byte);
821 dbgp_printk("mmio for ehci enabled\n");
822 }
823
824 /*
825 * FIXME I don't have the bar size so just guess PAGE_SIZE is more
826 * than enough. 1K is the biggest I have seen.
827 */
828 set_fixmap_nocache(FIX_DBGP_BASE, bar_val & PAGE_MASK);
829 ehci_bar = (void __iomem *)__fix_to_virt(FIX_DBGP_BASE);
830 ehci_bar += bar_val & ~PAGE_MASK;
831 dbgp_printk("ehci_bar: %p\n", ehci_bar);
832
833 ehci_caps = ehci_bar;
834 ehci_regs = ehci_bar + HC_LENGTH(readl(&ehci_caps->hc_capbase));
835 ehci_debug = ehci_bar + offset;
836 ehci_dev.bus = bus;
837 ehci_dev.slot = slot;
838 ehci_dev.func = func;
839
840 detect_set_debug_port();
841
842 ret = ehci_setup();
843 if (ret < 0) {
844 dbgp_printk("ehci_setup failed\n");
845 ehci_debug = NULL;
846
847 return -1;
848 }
849
850 return 0;
851}
852
853static void early_dbgp_write(struct console *con, const char *str, u32 n)
854{
855 int chunk, ret;
856
857 if (!ehci_debug)
858 return;
859 while (n > 0) {
860 chunk = n;
861 if (chunk > DBGP_MAX_PACKET)
862 chunk = DBGP_MAX_PACKET;
863 ret = dbgp_bulk_write(USB_DEBUG_DEVNUM,
864 dbgp_endpoint_out, str, chunk);
865 str += chunk;
866 n -= chunk;
867 }
868}
869
870static struct console early_dbgp_console = {
871 .name = "earlydbg",
872 .write = early_dbgp_write,
873 .flags = CON_PRINTBUFFER,
874 .index = -1,
875};
876#endif
877
154/* Console interface to a host file on AMD's SimNow! */ 878/* Console interface to a host file on AMD's SimNow! */
155 879
156static int simnow_fd; 880static int simnow_fd;
@@ -165,6 +889,7 @@ enum {
165static noinline long simnow(long cmd, long a, long b, long c) 889static noinline long simnow(long cmd, long a, long b, long c)
166{ 890{
167 long ret; 891 long ret;
892
168 asm volatile("cpuid" : 893 asm volatile("cpuid" :
169 "=a" (ret) : 894 "=a" (ret) :
170 "b" (a), "c" (b), "d" (c), "0" (MAGIC1), "D" (cmd + MAGIC2)); 895 "b" (a), "c" (b), "d" (c), "0" (MAGIC1), "D" (cmd + MAGIC2));
@@ -174,6 +899,7 @@ static noinline long simnow(long cmd, long a, long b, long c)
174static void __init simnow_init(char *str) 899static void __init simnow_init(char *str)
175{ 900{
176 char *fn = "klog"; 901 char *fn = "klog";
902
177 if (*str == '=') 903 if (*str == '=')
178 fn = ++str; 904 fn = ++str;
179 /* error ignored */ 905 /* error ignored */
@@ -194,7 +920,7 @@ static struct console simnow_console = {
194 920
195/* Direct interface for emergencies */ 921/* Direct interface for emergencies */
196static struct console *early_console = &early_vga_console; 922static struct console *early_console = &early_vga_console;
197static int early_console_initialized; 923static int __initdata early_console_initialized;
198 924
199asmlinkage void early_printk(const char *fmt, ...) 925asmlinkage void early_printk(const char *fmt, ...)
200{ 926{
@@ -208,10 +934,11 @@ asmlinkage void early_printk(const char *fmt, ...)
208 va_end(ap); 934 va_end(ap);
209} 935}
210 936
211static int __initdata keep_early;
212 937
213static int __init setup_early_printk(char *buf) 938static int __init setup_early_printk(char *buf)
214{ 939{
940 int keep_early;
941
215 if (!buf) 942 if (!buf)
216 return 0; 943 return 0;
217 944
@@ -219,8 +946,7 @@ static int __init setup_early_printk(char *buf)
219 return 0; 946 return 0;
220 early_console_initialized = 1; 947 early_console_initialized = 1;
221 948
222 if (strstr(buf, "keep")) 949 keep_early = (strstr(buf, "keep") != NULL);
223 keep_early = 1;
224 950
225 if (!strncmp(buf, "serial", 6)) { 951 if (!strncmp(buf, "serial", 6)) {
226 early_serial_init(buf + 6); 952 early_serial_init(buf + 6);
@@ -238,6 +964,17 @@ static int __init setup_early_printk(char *buf)
238 simnow_init(buf + 6); 964 simnow_init(buf + 6);
239 early_console = &simnow_console; 965 early_console = &simnow_console;
240 keep_early = 1; 966 keep_early = 1;
967#ifdef CONFIG_EARLY_PRINTK_DBGP
968 } else if (!strncmp(buf, "dbgp", 4)) {
969 if (early_dbgp_init(buf+4) < 0)
970 return 0;
971 early_console = &early_dbgp_console;
972 /*
973 * usb subsys will reset ehci controller, so don't keep
974 * that early console
975 */
976 keep_early = 0;
977#endif
241#ifdef CONFIG_HVC_XEN 978#ifdef CONFIG_HVC_XEN
242 } else if (!strncmp(buf, "xen", 3)) { 979 } else if (!strncmp(buf, "xen", 3)) {
243 early_console = &xenboot_console; 980 early_console = &xenboot_console;
@@ -251,4 +988,5 @@ static int __init setup_early_printk(char *buf)
251 register_console(early_console); 988 register_console(early_console);
252 return 0; 989 return 0;
253} 990}
991
254early_param("earlyprintk", setup_early_printk); 992early_param("earlyprintk", setup_early_printk);
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index 06cc8d4254b1..1119d247fe11 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -367,6 +367,10 @@ void __init efi_init(void)
367 efi.smbios = config_tables[i].table; 367 efi.smbios = config_tables[i].table;
368 printk(" SMBIOS=0x%lx ", config_tables[i].table); 368 printk(" SMBIOS=0x%lx ", config_tables[i].table);
369 } else if (!efi_guidcmp(config_tables[i].guid, 369 } else if (!efi_guidcmp(config_tables[i].guid,
370 UV_SYSTEM_TABLE_GUID)) {
371 efi.uv_systab = config_tables[i].table;
372 printk(" UVsystab=0x%lx ", config_tables[i].table);
373 } else if (!efi_guidcmp(config_tables[i].guid,
370 HCDP_TABLE_GUID)) { 374 HCDP_TABLE_GUID)) {
371 efi.hcdp = config_tables[i].table; 375 efi.hcdp = config_tables[i].table;
372 printk(" HCDP=0x%lx ", config_tables[i].table); 376 printk(" HCDP=0x%lx ", config_tables[i].table);
@@ -414,9 +418,11 @@ void __init efi_init(void)
414 if (memmap.map == NULL) 418 if (memmap.map == NULL)
415 printk(KERN_ERR "Could not map the EFI memory map!\n"); 419 printk(KERN_ERR "Could not map the EFI memory map!\n");
416 memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size); 420 memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
421
417 if (memmap.desc_size != sizeof(efi_memory_desc_t)) 422 if (memmap.desc_size != sizeof(efi_memory_desc_t))
418 printk(KERN_WARNING "Kernel-defined memdesc" 423 printk(KERN_WARNING
419 "doesn't match the one from EFI!\n"); 424 "Kernel-defined memdesc doesn't match the one from EFI!\n");
425
420 if (add_efi_memmap) 426 if (add_efi_memmap)
421 do_add_efi_memmap(); 427 do_add_efi_memmap();
422 428
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 109792bc7cfa..c356423a6026 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -629,7 +629,7 @@ ENTRY(interrupt)
629ENTRY(irq_entries_start) 629ENTRY(irq_entries_start)
630 RING0_INT_FRAME 630 RING0_INT_FRAME
631vector=0 631vector=0
632.rept NR_IRQS 632.rept NR_VECTORS
633 ALIGN 633 ALIGN
634 .if vector 634 .if vector
635 CFI_ADJUST_CFA_OFFSET -4 635 CFI_ADJUST_CFA_OFFSET -4
@@ -730,6 +730,7 @@ error_code:
730 movl $(__USER_DS), %ecx 730 movl $(__USER_DS), %ecx
731 movl %ecx, %ds 731 movl %ecx, %ds
732 movl %ecx, %es 732 movl %ecx, %es
733 TRACE_IRQS_OFF
733 movl %esp,%eax # pt_regs pointer 734 movl %esp,%eax # pt_regs pointer
734 call *%edi 735 call *%edi
735 jmp ret_from_exception 736 jmp ret_from_exception
@@ -760,20 +761,9 @@ ENTRY(device_not_available)
760 RING0_INT_FRAME 761 RING0_INT_FRAME
761 pushl $-1 # mark this as an int 762 pushl $-1 # mark this as an int
762 CFI_ADJUST_CFA_OFFSET 4 763 CFI_ADJUST_CFA_OFFSET 4
763 SAVE_ALL 764 pushl $do_device_not_available
764 GET_CR0_INTO_EAX
765 testl $0x4, %eax # EM (math emulation bit)
766 jne device_not_available_emulate
767 preempt_stop(CLBR_ANY)
768 call math_state_restore
769 jmp ret_from_exception
770device_not_available_emulate:
771 pushl $0 # temporary storage for ORIG_EIP
772 CFI_ADJUST_CFA_OFFSET 4 765 CFI_ADJUST_CFA_OFFSET 4
773 call math_emulate 766 jmp error_code
774 addl $4, %esp
775 CFI_ADJUST_CFA_OFFSET -4
776 jmp ret_from_exception
777 CFI_ENDPROC 767 CFI_ENDPROC
778END(device_not_available) 768END(device_not_available)
779 769
@@ -814,6 +804,7 @@ debug_stack_correct:
814 pushl $-1 # mark this as an int 804 pushl $-1 # mark this as an int
815 CFI_ADJUST_CFA_OFFSET 4 805 CFI_ADJUST_CFA_OFFSET 4
816 SAVE_ALL 806 SAVE_ALL
807 TRACE_IRQS_OFF
817 xorl %edx,%edx # error code 0 808 xorl %edx,%edx # error code 0
818 movl %esp,%eax # pt_regs pointer 809 movl %esp,%eax # pt_regs pointer
819 call do_debug 810 call do_debug
@@ -858,6 +849,7 @@ nmi_stack_correct:
858 pushl %eax 849 pushl %eax
859 CFI_ADJUST_CFA_OFFSET 4 850 CFI_ADJUST_CFA_OFFSET 4
860 SAVE_ALL 851 SAVE_ALL
852 TRACE_IRQS_OFF
861 xorl %edx,%edx # zero error code 853 xorl %edx,%edx # zero error code
862 movl %esp,%eax # pt_regs pointer 854 movl %esp,%eax # pt_regs pointer
863 call do_nmi 855 call do_nmi
@@ -898,6 +890,7 @@ nmi_espfix_stack:
898 pushl %eax 890 pushl %eax
899 CFI_ADJUST_CFA_OFFSET 4 891 CFI_ADJUST_CFA_OFFSET 4
900 SAVE_ALL 892 SAVE_ALL
893 TRACE_IRQS_OFF
901 FIXUP_ESPFIX_STACK # %eax == %esp 894 FIXUP_ESPFIX_STACK # %eax == %esp
902 xorl %edx,%edx # zero error code 895 xorl %edx,%edx # zero error code
903 call do_nmi 896 call do_nmi
@@ -928,6 +921,7 @@ KPROBE_ENTRY(int3)
928 pushl $-1 # mark this as an int 921 pushl $-1 # mark this as an int
929 CFI_ADJUST_CFA_OFFSET 4 922 CFI_ADJUST_CFA_OFFSET 4
930 SAVE_ALL 923 SAVE_ALL
924 TRACE_IRQS_OFF
931 xorl %edx,%edx # zero error code 925 xorl %edx,%edx # zero error code
932 movl %esp,%eax # pt_regs pointer 926 movl %esp,%eax # pt_regs pointer
933 call do_int3 927 call do_int3
@@ -1030,7 +1024,7 @@ ENTRY(machine_check)
1030 RING0_INT_FRAME 1024 RING0_INT_FRAME
1031 pushl $0 1025 pushl $0
1032 CFI_ADJUST_CFA_OFFSET 4 1026 CFI_ADJUST_CFA_OFFSET 4
1033 pushl machine_check_vector 1027 pushl $do_machine_check
1034 CFI_ADJUST_CFA_OFFSET 4 1028 CFI_ADJUST_CFA_OFFSET 4
1035 jmp error_code 1029 jmp error_code
1036 CFI_ENDPROC 1030 CFI_ENDPROC
@@ -1159,20 +1153,6 @@ ENDPROC(xen_failsafe_callback)
1159#ifdef CONFIG_DYNAMIC_FTRACE 1153#ifdef CONFIG_DYNAMIC_FTRACE
1160 1154
1161ENTRY(mcount) 1155ENTRY(mcount)
1162 pushl %eax
1163 pushl %ecx
1164 pushl %edx
1165 movl 0xc(%esp), %eax
1166 subl $MCOUNT_INSN_SIZE, %eax
1167
1168.globl mcount_call
1169mcount_call:
1170 call ftrace_stub
1171
1172 popl %edx
1173 popl %ecx
1174 popl %eax
1175
1176 ret 1156 ret
1177END(mcount) 1157END(mcount)
1178 1158
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 89434d439605..09e7145484c5 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -64,32 +64,6 @@
64#ifdef CONFIG_FTRACE 64#ifdef CONFIG_FTRACE
65#ifdef CONFIG_DYNAMIC_FTRACE 65#ifdef CONFIG_DYNAMIC_FTRACE
66ENTRY(mcount) 66ENTRY(mcount)
67
68 subq $0x38, %rsp
69 movq %rax, (%rsp)
70 movq %rcx, 8(%rsp)
71 movq %rdx, 16(%rsp)
72 movq %rsi, 24(%rsp)
73 movq %rdi, 32(%rsp)
74 movq %r8, 40(%rsp)
75 movq %r9, 48(%rsp)
76
77 movq 0x38(%rsp), %rdi
78 subq $MCOUNT_INSN_SIZE, %rdi
79
80.globl mcount_call
81mcount_call:
82 call ftrace_stub
83
84 movq 48(%rsp), %r9
85 movq 40(%rsp), %r8
86 movq 32(%rsp), %rdi
87 movq 24(%rsp), %rsi
88 movq 16(%rsp), %rdx
89 movq 8(%rsp), %rcx
90 movq (%rsp), %rax
91 addq $0x38, %rsp
92
93 retq 67 retq
94END(mcount) 68END(mcount)
95 69
@@ -275,9 +249,9 @@ ENTRY(native_usergs_sysret64)
275ENTRY(ret_from_fork) 249ENTRY(ret_from_fork)
276 CFI_DEFAULT_STACK 250 CFI_DEFAULT_STACK
277 push kernel_eflags(%rip) 251 push kernel_eflags(%rip)
278 CFI_ADJUST_CFA_OFFSET 4 252 CFI_ADJUST_CFA_OFFSET 8
279 popf # reset kernel eflags 253 popf # reset kernel eflags
280 CFI_ADJUST_CFA_OFFSET -4 254 CFI_ADJUST_CFA_OFFSET -8
281 call schedule_tail 255 call schedule_tail
282 GET_THREAD_INFO(%rcx) 256 GET_THREAD_INFO(%rcx)
283 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) 257 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
@@ -667,6 +641,13 @@ END(stub_rt_sigreturn)
667 SAVE_ARGS 641 SAVE_ARGS
668 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler 642 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
669 pushq %rbp 643 pushq %rbp
644 /*
645 * Save rbp twice: One is for marking the stack frame, as usual, and the
646 * other, to fill pt_regs properly. This is because bx comes right
647 * before the last saved register in that structure, and not bp. If the
648 * base pointer were in the place bx is today, this would not be needed.
649 */
650 movq %rbp, -8(%rsp)
670 CFI_ADJUST_CFA_OFFSET 8 651 CFI_ADJUST_CFA_OFFSET 8
671 CFI_REL_OFFSET rbp, 0 652 CFI_REL_OFFSET rbp, 0
672 movq %rsp,%rbp 653 movq %rsp,%rbp
@@ -932,6 +913,9 @@ END(spurious_interrupt)
932 .if \ist 913 .if \ist
933 movq %gs:pda_data_offset, %rbp 914 movq %gs:pda_data_offset, %rbp
934 .endif 915 .endif
916 .if \irqtrace
917 TRACE_IRQS_OFF
918 .endif
935 movq %rsp,%rdi 919 movq %rsp,%rdi
936 movq ORIG_RAX(%rsp),%rsi 920 movq ORIG_RAX(%rsp),%rsi
937 movq $-1,ORIG_RAX(%rsp) 921 movq $-1,ORIG_RAX(%rsp)
@@ -1058,7 +1042,8 @@ KPROBE_ENTRY(error_entry)
1058 je error_kernelspace 1042 je error_kernelspace
1059error_swapgs: 1043error_swapgs:
1060 SWAPGS 1044 SWAPGS
1061error_sti: 1045error_sti:
1046 TRACE_IRQS_OFF
1062 movq %rdi,RDI(%rsp) 1047 movq %rdi,RDI(%rsp)
1063 CFI_REL_OFFSET rdi,RDI 1048 CFI_REL_OFFSET rdi,RDI
1064 movq %rsp,%rdi 1049 movq %rsp,%rdi
@@ -1232,7 +1217,7 @@ ENTRY(simd_coprocessor_error)
1232END(simd_coprocessor_error) 1217END(simd_coprocessor_error)
1233 1218
1234ENTRY(device_not_available) 1219ENTRY(device_not_available)
1235 zeroentry math_state_restore 1220 zeroentry do_device_not_available
1236END(device_not_available) 1221END(device_not_available)
1237 1222
1238 /* runs on exception stack */ 1223 /* runs on exception stack */
diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c
new file mode 100644
index 000000000000..f454c78fcef6
--- /dev/null
+++ b/arch/x86/kernel/es7000_32.c
@@ -0,0 +1,363 @@
1/*
2 * Written by: Garry Forsgren, Unisys Corporation
3 * Natalie Protasevich, Unisys Corporation
4 * This file contains the code to configure and interface
5 * with Unisys ES7000 series hardware system manager.
6 *
7 * Copyright (c) 2003 Unisys Corporation. All Rights Reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it would be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
20 *
21 * Contact information: Unisys Corporation, Township Line & Union Meeting
22 * Roads-A, Unisys Way, Blue Bell, Pennsylvania, 19424, or:
23 *
24 * http://www.unisys.com
25 */
26
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/smp.h>
31#include <linux/string.h>
32#include <linux/spinlock.h>
33#include <linux/errno.h>
34#include <linux/notifier.h>
35#include <linux/reboot.h>
36#include <linux/init.h>
37#include <linux/acpi.h>
38#include <asm/io.h>
39#include <asm/nmi.h>
40#include <asm/smp.h>
41#include <asm/apicdef.h>
42#include <mach_mpparse.h>
43
44/*
45 * ES7000 chipsets
46 */
47
48#define NON_UNISYS 0
49#define ES7000_CLASSIC 1
50#define ES7000_ZORRO 2
51
52
53#define MIP_REG 1
54#define MIP_PSAI_REG 4
55
56#define MIP_BUSY 1
57#define MIP_SPIN 0xf0000
58#define MIP_VALID 0x0100000000000000ULL
59#define MIP_PORT(VALUE) ((VALUE >> 32) & 0xffff)
60
61#define MIP_RD_LO(VALUE) (VALUE & 0xffffffff)
62
63struct mip_reg_info {
64 unsigned long long mip_info;
65 unsigned long long delivery_info;
66 unsigned long long host_reg;
67 unsigned long long mip_reg;
68};
69
70struct part_info {
71 unsigned char type;
72 unsigned char length;
73 unsigned char part_id;
74 unsigned char apic_mode;
75 unsigned long snum;
76 char ptype[16];
77 char sname[64];
78 char pname[64];
79};
80
81struct psai {
82 unsigned long long entry_type;
83 unsigned long long addr;
84 unsigned long long bep_addr;
85};
86
87struct es7000_mem_info {
88 unsigned char type;
89 unsigned char length;
90 unsigned char resv[6];
91 unsigned long long start;
92 unsigned long long size;
93};
94
95struct es7000_oem_table {
96 unsigned long long hdr;
97 struct mip_reg_info mip;
98 struct part_info pif;
99 struct es7000_mem_info shm;
100 struct psai psai;
101};
102
103#ifdef CONFIG_ACPI
104
105struct oem_table {
106 struct acpi_table_header Header;
107 u32 OEMTableAddr;
108 u32 OEMTableSize;
109};
110
111extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
112extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
113#endif
114
115struct mip_reg {
116 unsigned long long off_0;
117 unsigned long long off_8;
118 unsigned long long off_10;
119 unsigned long long off_18;
120 unsigned long long off_20;
121 unsigned long long off_28;
122 unsigned long long off_30;
123 unsigned long long off_38;
124};
125
126#define MIP_SW_APIC 0x1020b
127#define MIP_FUNC(VALUE) (VALUE & 0xff)
128
129/*
130 * ES7000 Globals
131 */
132
133static volatile unsigned long *psai = NULL;
134static struct mip_reg *mip_reg;
135static struct mip_reg *host_reg;
136static int mip_port;
137static unsigned long mip_addr, host_addr;
138
139int es7000_plat;
140
141/*
142 * GSI override for ES7000 platforms.
143 */
144
145static unsigned int base;
146
147static int
148es7000_rename_gsi(int ioapic, int gsi)
149{
150 if (es7000_plat == ES7000_ZORRO)
151 return gsi;
152
153 if (!base) {
154 int i;
155 for (i = 0; i < nr_ioapics; i++)
156 base += nr_ioapic_registers[i];
157 }
158
159 if (!ioapic && (gsi < 16))
160 gsi += base;
161 return gsi;
162}
163
164void __init
165setup_unisys(void)
166{
167 /*
168 * Determine the generation of the ES7000 currently running.
169 *
170 * es7000_plat = 1 if the machine is a 5xx ES7000 box
171 * es7000_plat = 2 if the machine is a x86_64 ES7000 box
172 *
173 */
174 if (!(boot_cpu_data.x86 <= 15 && boot_cpu_data.x86_model <= 2))
175 es7000_plat = ES7000_ZORRO;
176 else
177 es7000_plat = ES7000_CLASSIC;
178 ioapic_renumber_irq = es7000_rename_gsi;
179}
180
181/*
182 * Parse the OEM Table
183 */
184
185int __init
186parse_unisys_oem (char *oemptr)
187{
188 int i;
189 int success = 0;
190 unsigned char type, size;
191 unsigned long val;
192 char *tp = NULL;
193 struct psai *psaip = NULL;
194 struct mip_reg_info *mi;
195 struct mip_reg *host, *mip;
196
197 tp = oemptr;
198
199 tp += 8;
200
201 for (i=0; i <= 6; i++) {
202 type = *tp++;
203 size = *tp++;
204 tp -= 2;
205 switch (type) {
206 case MIP_REG:
207 mi = (struct mip_reg_info *)tp;
208 val = MIP_RD_LO(mi->host_reg);
209 host_addr = val;
210 host = (struct mip_reg *)val;
211 host_reg = __va(host);
212 val = MIP_RD_LO(mi->mip_reg);
213 mip_port = MIP_PORT(mi->mip_info);
214 mip_addr = val;
215 mip = (struct mip_reg *)val;
216 mip_reg = __va(mip);
217 pr_debug("es7000_mipcfg: host_reg = 0x%lx \n",
218 (unsigned long)host_reg);
219 pr_debug("es7000_mipcfg: mip_reg = 0x%lx \n",
220 (unsigned long)mip_reg);
221 success++;
222 break;
223 case MIP_PSAI_REG:
224 psaip = (struct psai *)tp;
225 if (tp != NULL) {
226 if (psaip->addr)
227 psai = __va(psaip->addr);
228 else
229 psai = NULL;
230 success++;
231 }
232 break;
233 default:
234 break;
235 }
236 tp += size;
237 }
238
239 if (success < 2) {
240 es7000_plat = NON_UNISYS;
241 } else
242 setup_unisys();
243 return es7000_plat;
244}
245
246#ifdef CONFIG_ACPI
247static unsigned long oem_addrX;
248static unsigned long oem_size;
249int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
250{
251 struct acpi_table_header *header = NULL;
252 int i = 0;
253 acpi_size tbl_size;
254
255 while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) {
256 if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
257 struct oem_table *t = (struct oem_table *)header;
258
259 oem_addrX = t->OEMTableAddr;
260 oem_size = t->OEMTableSize;
261 early_acpi_os_unmap_memory(header, tbl_size);
262
263 *oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
264 oem_size);
265 return 0;
266 }
267 early_acpi_os_unmap_memory(header, tbl_size);
268 }
269 return -1;
270}
271
272void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
273{
274 if (!oem_addr)
275 return;
276
277 __acpi_unmap_table((char *)oem_addr, oem_size);
278}
279#endif
280
281static void
282es7000_spin(int n)
283{
284 int i = 0;
285
286 while (i++ < n)
287 rep_nop();
288}
289
290static int __init
291es7000_mip_write(struct mip_reg *mip_reg)
292{
293 int status = 0;
294 int spin;
295
296 spin = MIP_SPIN;
297 while (((unsigned long long)host_reg->off_38 &
298 (unsigned long long)MIP_VALID) != 0) {
299 if (--spin <= 0) {
300 printk("es7000_mip_write: Timeout waiting for Host Valid Flag");
301 return -1;
302 }
303 es7000_spin(MIP_SPIN);
304 }
305
306 memcpy(host_reg, mip_reg, sizeof(struct mip_reg));
307 outb(1, mip_port);
308
309 spin = MIP_SPIN;
310
311 while (((unsigned long long)mip_reg->off_38 &
312 (unsigned long long)MIP_VALID) == 0) {
313 if (--spin <= 0) {
314 printk("es7000_mip_write: Timeout waiting for MIP Valid Flag");
315 return -1;
316 }
317 es7000_spin(MIP_SPIN);
318 }
319
320 status = ((unsigned long long)mip_reg->off_0 &
321 (unsigned long long)0xffff0000000000ULL) >> 48;
322 mip_reg->off_38 = ((unsigned long long)mip_reg->off_38 &
323 (unsigned long long)~MIP_VALID);
324 return status;
325}
326
327int
328es7000_start_cpu(int cpu, unsigned long eip)
329{
330 unsigned long vect = 0, psaival = 0;
331
332 if (psai == NULL)
333 return -1;
334
335 vect = ((unsigned long)__pa(eip)/0x1000) << 16;
336 psaival = (0x1000000 | vect | cpu);
337
338 while (*psai & 0x1000000)
339 ;
340
341 *psai = psaival;
342
343 return 0;
344
345}
346
347void __init
348es7000_sw_apic(void)
349{
350 if (es7000_plat) {
351 int mip_status;
352 struct mip_reg es7000_mip_reg;
353
354 printk("ES7000: Enabling APIC mode.\n");
355 memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
356 es7000_mip_reg.off_0 = MIP_SW_APIC;
357 es7000_mip_reg.off_38 = (MIP_VALID);
358 while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0)
359 printk("es7000_sw_apic: command failed, status = %x\n",
360 mip_status);
361 return;
362 }
363}
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index ab115cd15fdf..d073d981a730 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -11,17 +11,18 @@
11 11
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/hardirq.h> 13#include <linux/hardirq.h>
14#include <linux/uaccess.h>
14#include <linux/ftrace.h> 15#include <linux/ftrace.h>
15#include <linux/percpu.h> 16#include <linux/percpu.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/list.h> 18#include <linux/list.h>
18 19
19#include <asm/alternative.h>
20#include <asm/ftrace.h> 20#include <asm/ftrace.h>
21#include <asm/nops.h>
21 22
22 23
23/* Long is fine, even if it is only 4 bytes ;-) */ 24/* Long is fine, even if it is only 4 bytes ;-) */
24static long *ftrace_nop; 25static unsigned long *ftrace_nop;
25 26
26union ftrace_code_union { 27union ftrace_code_union {
27 char code[MCOUNT_INSN_SIZE]; 28 char code[MCOUNT_INSN_SIZE];
@@ -60,11 +61,7 @@ notrace int
60ftrace_modify_code(unsigned long ip, unsigned char *old_code, 61ftrace_modify_code(unsigned long ip, unsigned char *old_code,
61 unsigned char *new_code) 62 unsigned char *new_code)
62{ 63{
63 unsigned replaced; 64 unsigned char replaced[MCOUNT_INSN_SIZE];
64 unsigned old = *(unsigned *)old_code; /* 4 bytes */
65 unsigned new = *(unsigned *)new_code; /* 4 bytes */
66 unsigned char newch = new_code[4];
67 int faulted = 0;
68 65
69 /* 66 /*
70 * Note: Due to modules and __init, code can 67 * Note: Due to modules and __init, code can
@@ -72,29 +69,20 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
72 * as well as code changing. 69 * as well as code changing.
73 * 70 *
74 * No real locking needed, this code is run through 71 * No real locking needed, this code is run through
75 * kstop_machine. 72 * kstop_machine, or before SMP starts.
76 */ 73 */
77 asm volatile ( 74 if (__copy_from_user_inatomic(replaced, (char __user *)ip, MCOUNT_INSN_SIZE))
78 "1: lock\n" 75 return 1;
79 " cmpxchg %3, (%2)\n" 76
80 " jnz 2f\n" 77 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
81 " movb %b4, 4(%2)\n" 78 return 2;
82 "2:\n"
83 ".section .fixup, \"ax\"\n"
84 "3: movl $1, %0\n"
85 " jmp 2b\n"
86 ".previous\n"
87 _ASM_EXTABLE(1b, 3b)
88 : "=r"(faulted), "=a"(replaced)
89 : "r"(ip), "r"(new), "c"(newch),
90 "0"(faulted), "a"(old)
91 : "memory");
92 sync_core();
93 79
94 if (replaced != old && replaced != new) 80 WARN_ON_ONCE(__copy_to_user_inatomic((char __user *)ip, new_code,
95 faulted = 2; 81 MCOUNT_INSN_SIZE));
96 82
97 return faulted; 83 sync_core();
84
85 return 0;
98} 86}
99 87
100notrace int ftrace_update_ftrace_func(ftrace_func_t func) 88notrace int ftrace_update_ftrace_func(ftrace_func_t func)
@@ -112,30 +100,76 @@ notrace int ftrace_update_ftrace_func(ftrace_func_t func)
112 100
113notrace int ftrace_mcount_set(unsigned long *data) 101notrace int ftrace_mcount_set(unsigned long *data)
114{ 102{
115 unsigned long ip = (long)(&mcount_call); 103 /* mcount is initialized as a nop */
116 unsigned long *addr = data; 104 *data = 0;
117 unsigned char old[MCOUNT_INSN_SIZE], *new;
118
119 /*
120 * Replace the mcount stub with a pointer to the
121 * ip recorder function.
122 */
123 memcpy(old, &mcount_call, MCOUNT_INSN_SIZE);
124 new = ftrace_call_replace(ip, *addr);
125 *addr = ftrace_modify_code(ip, old, new);
126
127 return 0; 105 return 0;
128} 106}
129 107
130int __init ftrace_dyn_arch_init(void *data) 108int __init ftrace_dyn_arch_init(void *data)
131{ 109{
132 const unsigned char *const *noptable = find_nop_table(); 110 extern const unsigned char ftrace_test_p6nop[];
133 111 extern const unsigned char ftrace_test_nop5[];
134 /* This is running in kstop_machine */ 112 extern const unsigned char ftrace_test_jmp[];
135 113 int faulted = 0;
136 ftrace_mcount_set(data);
137 114
138 ftrace_nop = (unsigned long *)noptable[MCOUNT_INSN_SIZE]; 115 /*
116 * There is no good nop for all x86 archs.
117 * We will default to using the P6_NOP5, but first we
118 * will test to make sure that the nop will actually
119 * work on this CPU. If it faults, we will then
120 * go to a lesser efficient 5 byte nop. If that fails
121 * we then just use a jmp as our nop. This isn't the most
122 * efficient nop, but we can not use a multi part nop
123 * since we would then risk being preempted in the middle
124 * of that nop, and if we enabled tracing then, it might
125 * cause a system crash.
126 *
127 * TODO: check the cpuid to determine the best nop.
128 */
129 asm volatile (
130 "jmp ftrace_test_jmp\n"
131 /* This code needs to stay around */
132 ".section .text, \"ax\"\n"
133 "ftrace_test_jmp:"
134 "jmp ftrace_test_p6nop\n"
135 "nop\n"
136 "nop\n"
137 "nop\n" /* 2 byte jmp + 3 bytes */
138 "ftrace_test_p6nop:"
139 P6_NOP5
140 "jmp 1f\n"
141 "ftrace_test_nop5:"
142 ".byte 0x66,0x66,0x66,0x66,0x90\n"
143 "jmp 1f\n"
144 ".previous\n"
145 "1:"
146 ".section .fixup, \"ax\"\n"
147 "2: movl $1, %0\n"
148 " jmp ftrace_test_nop5\n"
149 "3: movl $2, %0\n"
150 " jmp 1b\n"
151 ".previous\n"
152 _ASM_EXTABLE(ftrace_test_p6nop, 2b)
153 _ASM_EXTABLE(ftrace_test_nop5, 3b)
154 : "=r"(faulted) : "0" (faulted));
155
156 switch (faulted) {
157 case 0:
158 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
159 ftrace_nop = (unsigned long *)ftrace_test_p6nop;
160 break;
161 case 1:
162 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
163 ftrace_nop = (unsigned long *)ftrace_test_nop5;
164 break;
165 case 2:
166 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
167 ftrace_nop = (unsigned long *)ftrace_test_jmp;
168 break;
169 }
170
171 /* The return code is retured via data */
172 *(unsigned long *)data = 0;
139 173
140 return 0; 174 return 0;
141} 175}
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
index eaff0bbb1444..6c9bfc9e1e95 100644
--- a/arch/x86/kernel/genapic_64.c
+++ b/arch/x86/kernel/genapic_64.c
@@ -16,87 +16,63 @@
16#include <linux/ctype.h> 16#include <linux/ctype.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/hardirq.h> 18#include <linux/hardirq.h>
19#include <linux/dmar.h>
19 20
20#include <asm/smp.h> 21#include <asm/smp.h>
21#include <asm/ipi.h> 22#include <asm/ipi.h>
22#include <asm/genapic.h> 23#include <asm/genapic.h>
23 24
24#ifdef CONFIG_ACPI 25extern struct genapic apic_flat;
25#include <acpi/acpi_bus.h> 26extern struct genapic apic_physflat;
26#endif 27extern struct genapic apic_x2xpic_uv_x;
27 28extern struct genapic apic_x2apic_phys;
28DEFINE_PER_CPU(int, x2apic_extra_bits); 29extern struct genapic apic_x2apic_cluster;
29 30
30struct genapic __read_mostly *genapic = &apic_flat; 31struct genapic __read_mostly *genapic = &apic_flat;
31 32
32static enum uv_system_type uv_system_type; 33static struct genapic *apic_probe[] __initdata = {
34 &apic_x2apic_uv_x,
35 &apic_x2apic_phys,
36 &apic_x2apic_cluster,
37 &apic_physflat,
38 NULL,
39};
33 40
34/* 41/*
35 * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. 42 * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
36 */ 43 */
37void __init setup_apic_routing(void) 44void __init setup_apic_routing(void)
38{ 45{
39 if (uv_system_type == UV_NON_UNIQUE_APIC) 46 if (genapic == &apic_x2apic_phys || genapic == &apic_x2apic_cluster) {
40 genapic = &apic_x2apic_uv_x; 47 if (!intr_remapping_enabled)
41 else 48 genapic = &apic_flat;
42#ifdef CONFIG_ACPI 49 }
43 /*
44 * Quirk: some x86_64 machines can only use physical APIC mode
45 * regardless of how many processors are present (x86_64 ES7000
46 * is an example).
47 */
48 if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID &&
49 (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL))
50 genapic = &apic_physflat;
51 else
52#endif
53
54 if (max_physical_apicid < 8)
55 genapic = &apic_flat;
56 else
57 genapic = &apic_physflat;
58 50
59 printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name); 51 if (genapic == &apic_flat) {
52 if (max_physical_apicid >= 8)
53 genapic = &apic_physflat;
54 printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
55 }
60} 56}
61 57
62/* Same for both flat and physical. */ 58/* Same for both flat and physical. */
63 59
64void send_IPI_self(int vector) 60void apic_send_IPI_self(int vector)
65{ 61{
66 __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); 62 __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
67} 63}
68 64
69int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) 65int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
70{ 66{
71 if (!strcmp(oem_id, "SGI")) { 67 int i;
72 if (!strcmp(oem_table_id, "UVL")) 68
73 uv_system_type = UV_LEGACY_APIC; 69 for (i = 0; apic_probe[i]; ++i) {
74 else if (!strcmp(oem_table_id, "UVX")) 70 if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
75 uv_system_type = UV_X2APIC; 71 genapic = apic_probe[i];
76 else if (!strcmp(oem_table_id, "UVH")) 72 printk(KERN_INFO "Setting APIC routing to %s.\n",
77 uv_system_type = UV_NON_UNIQUE_APIC; 73 genapic->name);
74 return 1;
75 }
78 } 76 }
79 return 0; 77 return 0;
80} 78}
81
82unsigned int read_apic_id(void)
83{
84 unsigned int id;
85
86 WARN_ON(preemptible() && num_online_cpus() > 1);
87 id = apic_read(APIC_ID);
88 if (uv_system_type >= UV_X2APIC)
89 id |= __get_cpu_var(x2apic_extra_bits);
90 return id;
91}
92
93enum uv_system_type get_uv_system_type(void)
94{
95 return uv_system_type;
96}
97
98int is_uv_system(void)
99{
100 return uv_system_type != UV_NONE;
101}
102EXPORT_SYMBOL_GPL(is_uv_system);
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index 786548a62d38..2ec2de8d8c46 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -15,9 +15,20 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/ctype.h> 16#include <linux/ctype.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/hardirq.h>
18#include <asm/smp.h> 19#include <asm/smp.h>
19#include <asm/ipi.h> 20#include <asm/ipi.h>
20#include <asm/genapic.h> 21#include <asm/genapic.h>
22#include <mach_apicdef.h>
23
24#ifdef CONFIG_ACPI
25#include <acpi/acpi_bus.h>
26#endif
27
28static int __init flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
29{
30 return 1;
31}
21 32
22static cpumask_t flat_target_cpus(void) 33static cpumask_t flat_target_cpus(void)
23{ 34{
@@ -95,9 +106,33 @@ static void flat_send_IPI_all(int vector)
95 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); 106 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
96} 107}
97 108
109static unsigned int get_apic_id(unsigned long x)
110{
111 unsigned int id;
112
113 id = (((x)>>24) & 0xFFu);
114 return id;
115}
116
117static unsigned long set_apic_id(unsigned int id)
118{
119 unsigned long x;
120
121 x = ((id & 0xFFu)<<24);
122 return x;
123}
124
125static unsigned int read_xapic_id(void)
126{
127 unsigned int id;
128
129 id = get_apic_id(apic_read(APIC_ID));
130 return id;
131}
132
98static int flat_apic_id_registered(void) 133static int flat_apic_id_registered(void)
99{ 134{
100 return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map); 135 return physid_isset(read_xapic_id(), phys_cpu_present_map);
101} 136}
102 137
103static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) 138static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask)
@@ -112,6 +147,7 @@ static unsigned int phys_pkg_id(int index_msb)
112 147
113struct genapic apic_flat = { 148struct genapic apic_flat = {
114 .name = "flat", 149 .name = "flat",
150 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
115 .int_delivery_mode = dest_LowestPrio, 151 .int_delivery_mode = dest_LowestPrio,
116 .int_dest_mode = (APIC_DEST_LOGICAL != 0), 152 .int_dest_mode = (APIC_DEST_LOGICAL != 0),
117 .target_cpus = flat_target_cpus, 153 .target_cpus = flat_target_cpus,
@@ -121,8 +157,12 @@ struct genapic apic_flat = {
121 .send_IPI_all = flat_send_IPI_all, 157 .send_IPI_all = flat_send_IPI_all,
122 .send_IPI_allbutself = flat_send_IPI_allbutself, 158 .send_IPI_allbutself = flat_send_IPI_allbutself,
123 .send_IPI_mask = flat_send_IPI_mask, 159 .send_IPI_mask = flat_send_IPI_mask,
160 .send_IPI_self = apic_send_IPI_self,
124 .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, 161 .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
125 .phys_pkg_id = phys_pkg_id, 162 .phys_pkg_id = phys_pkg_id,
163 .get_apic_id = get_apic_id,
164 .set_apic_id = set_apic_id,
165 .apic_id_mask = (0xFFu<<24),
126}; 166};
127 167
128/* 168/*
@@ -130,6 +170,23 @@ struct genapic apic_flat = {
130 * We cannot use logical delivery in this case because the mask 170 * We cannot use logical delivery in this case because the mask
131 * overflows, so use physical mode. 171 * overflows, so use physical mode.
132 */ 172 */
173static int __init physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
174{
175#ifdef CONFIG_ACPI
176 /*
177 * Quirk: some x86_64 machines can only use physical APIC mode
178 * regardless of how many processors are present (x86_64 ES7000
179 * is an example).
180 */
181 if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID &&
182 (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
183 printk(KERN_DEBUG "system APIC only can use physical flat");
184 return 1;
185 }
186#endif
187
188 return 0;
189}
133 190
134static cpumask_t physflat_target_cpus(void) 191static cpumask_t physflat_target_cpus(void)
135{ 192{
@@ -176,6 +233,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
176 233
177struct genapic apic_physflat = { 234struct genapic apic_physflat = {
178 .name = "physical flat", 235 .name = "physical flat",
236 .acpi_madt_oem_check = physflat_acpi_madt_oem_check,
179 .int_delivery_mode = dest_Fixed, 237 .int_delivery_mode = dest_Fixed,
180 .int_dest_mode = (APIC_DEST_PHYSICAL != 0), 238 .int_dest_mode = (APIC_DEST_PHYSICAL != 0),
181 .target_cpus = physflat_target_cpus, 239 .target_cpus = physflat_target_cpus,
@@ -185,6 +243,10 @@ struct genapic apic_physflat = {
185 .send_IPI_all = physflat_send_IPI_all, 243 .send_IPI_all = physflat_send_IPI_all,
186 .send_IPI_allbutself = physflat_send_IPI_allbutself, 244 .send_IPI_allbutself = physflat_send_IPI_allbutself,
187 .send_IPI_mask = physflat_send_IPI_mask, 245 .send_IPI_mask = physflat_send_IPI_mask,
246 .send_IPI_self = apic_send_IPI_self,
188 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, 247 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
189 .phys_pkg_id = phys_pkg_id, 248 .phys_pkg_id = phys_pkg_id,
249 .get_apic_id = get_apic_id,
250 .set_apic_id = set_apic_id,
251 .apic_id_mask = (0xFFu<<24),
190}; 252};
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c
new file mode 100644
index 000000000000..e4bf2cc0d743
--- /dev/null
+++ b/arch/x86/kernel/genx2apic_cluster.c
@@ -0,0 +1,159 @@
1#include <linux/threads.h>
2#include <linux/cpumask.h>
3#include <linux/string.h>
4#include <linux/kernel.h>
5#include <linux/ctype.h>
6#include <linux/init.h>
7#include <linux/dmar.h>
8
9#include <asm/smp.h>
10#include <asm/ipi.h>
11#include <asm/genapic.h>
12
13DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
14
15static int __init x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
16{
17 if (cpu_has_x2apic)
18 return 1;
19
20 return 0;
21}
22
23/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
24
25static cpumask_t x2apic_target_cpus(void)
26{
27 return cpumask_of_cpu(0);
28}
29
30/*
31 * for now each logical cpu is in its own vector allocation domain.
32 */
33static cpumask_t x2apic_vector_allocation_domain(int cpu)
34{
35 cpumask_t domain = CPU_MASK_NONE;
36 cpu_set(cpu, domain);
37 return domain;
38}
39
40static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
41 unsigned int dest)
42{
43 unsigned long cfg;
44
45 cfg = __prepare_ICR(0, vector, dest);
46
47 /*
48 * send the IPI.
49 */
50 x2apic_icr_write(cfg, apicid);
51}
52
53/*
54 * for now, we send the IPI's one by one in the cpumask.
55 * TBD: Based on the cpu mask, we can send the IPI's to the cluster group
56 * at once. We have 16 cpu's in a cluster. This will minimize IPI register
57 * writes.
58 */
59static void x2apic_send_IPI_mask(cpumask_t mask, int vector)
60{
61 unsigned long flags;
62 unsigned long query_cpu;
63
64 local_irq_save(flags);
65 for_each_cpu_mask(query_cpu, mask) {
66 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu),
67 vector, APIC_DEST_LOGICAL);
68 }
69 local_irq_restore(flags);
70}
71
72static void x2apic_send_IPI_allbutself(int vector)
73{
74 cpumask_t mask = cpu_online_map;
75
76 cpu_clear(smp_processor_id(), mask);
77
78 if (!cpus_empty(mask))
79 x2apic_send_IPI_mask(mask, vector);
80}
81
82static void x2apic_send_IPI_all(int vector)
83{
84 x2apic_send_IPI_mask(cpu_online_map, vector);
85}
86
87static int x2apic_apic_id_registered(void)
88{
89 return 1;
90}
91
92static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
93{
94 int cpu;
95
96 /*
97 * We're using fixed IRQ delivery, can only return one phys APIC ID.
98 * May as well be the first.
99 */
100 cpu = first_cpu(cpumask);
101 if ((unsigned)cpu < NR_CPUS)
102 return per_cpu(x86_cpu_to_logical_apicid, cpu);
103 else
104 return BAD_APICID;
105}
106
107static unsigned int get_apic_id(unsigned long x)
108{
109 unsigned int id;
110
111 id = x;
112 return id;
113}
114
115static unsigned long set_apic_id(unsigned int id)
116{
117 unsigned long x;
118
119 x = id;
120 return x;
121}
122
123static unsigned int phys_pkg_id(int index_msb)
124{
125 return current_cpu_data.initial_apicid >> index_msb;
126}
127
128static void x2apic_send_IPI_self(int vector)
129{
130 apic_write(APIC_SELF_IPI, vector);
131}
132
133static void init_x2apic_ldr(void)
134{
135 int cpu = smp_processor_id();
136
137 per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
138 return;
139}
140
141struct genapic apic_x2apic_cluster = {
142 .name = "cluster x2apic",
143 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
144 .int_delivery_mode = dest_LowestPrio,
145 .int_dest_mode = (APIC_DEST_LOGICAL != 0),
146 .target_cpus = x2apic_target_cpus,
147 .vector_allocation_domain = x2apic_vector_allocation_domain,
148 .apic_id_registered = x2apic_apic_id_registered,
149 .init_apic_ldr = init_x2apic_ldr,
150 .send_IPI_all = x2apic_send_IPI_all,
151 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
152 .send_IPI_mask = x2apic_send_IPI_mask,
153 .send_IPI_self = x2apic_send_IPI_self,
154 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
155 .phys_pkg_id = phys_pkg_id,
156 .get_apic_id = get_apic_id,
157 .set_apic_id = set_apic_id,
158 .apic_id_mask = (0xFFFFFFFFu),
159};
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c
new file mode 100644
index 000000000000..8f1343df2627
--- /dev/null
+++ b/arch/x86/kernel/genx2apic_phys.c
@@ -0,0 +1,154 @@
1#include <linux/threads.h>
2#include <linux/cpumask.h>
3#include <linux/string.h>
4#include <linux/kernel.h>
5#include <linux/ctype.h>
6#include <linux/init.h>
7#include <linux/dmar.h>
8
9#include <asm/smp.h>
10#include <asm/ipi.h>
11#include <asm/genapic.h>
12
13static int x2apic_phys;
14
15static int set_x2apic_phys_mode(char *arg)
16{
17 x2apic_phys = 1;
18 return 0;
19}
20early_param("x2apic_phys", set_x2apic_phys_mode);
21
22static int __init x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
23{
24 if (cpu_has_x2apic && x2apic_phys)
25 return 1;
26
27 return 0;
28}
29
30/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
31
32static cpumask_t x2apic_target_cpus(void)
33{
34 return cpumask_of_cpu(0);
35}
36
37static cpumask_t x2apic_vector_allocation_domain(int cpu)
38{
39 cpumask_t domain = CPU_MASK_NONE;
40 cpu_set(cpu, domain);
41 return domain;
42}
43
44static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
45 unsigned int dest)
46{
47 unsigned long cfg;
48
49 cfg = __prepare_ICR(0, vector, dest);
50
51 /*
52 * send the IPI.
53 */
54 x2apic_icr_write(cfg, apicid);
55}
56
57static void x2apic_send_IPI_mask(cpumask_t mask, int vector)
58{
59 unsigned long flags;
60 unsigned long query_cpu;
61
62 local_irq_save(flags);
63 for_each_cpu_mask(query_cpu, mask) {
64 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
65 vector, APIC_DEST_PHYSICAL);
66 }
67 local_irq_restore(flags);
68}
69
70static void x2apic_send_IPI_allbutself(int vector)
71{
72 cpumask_t mask = cpu_online_map;
73
74 cpu_clear(smp_processor_id(), mask);
75
76 if (!cpus_empty(mask))
77 x2apic_send_IPI_mask(mask, vector);
78}
79
80static void x2apic_send_IPI_all(int vector)
81{
82 x2apic_send_IPI_mask(cpu_online_map, vector);
83}
84
85static int x2apic_apic_id_registered(void)
86{
87 return 1;
88}
89
90static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
91{
92 int cpu;
93
94 /*
95 * We're using fixed IRQ delivery, can only return one phys APIC ID.
96 * May as well be the first.
97 */
98 cpu = first_cpu(cpumask);
99 if ((unsigned)cpu < NR_CPUS)
100 return per_cpu(x86_cpu_to_apicid, cpu);
101 else
102 return BAD_APICID;
103}
104
105static unsigned int get_apic_id(unsigned long x)
106{
107 unsigned int id;
108
109 id = x;
110 return id;
111}
112
113static unsigned long set_apic_id(unsigned int id)
114{
115 unsigned long x;
116
117 x = id;
118 return x;
119}
120
121static unsigned int phys_pkg_id(int index_msb)
122{
123 return current_cpu_data.initial_apicid >> index_msb;
124}
125
126void x2apic_send_IPI_self(int vector)
127{
128 apic_write(APIC_SELF_IPI, vector);
129}
130
131void init_x2apic_ldr(void)
132{
133 return;
134}
135
136struct genapic apic_x2apic_phys = {
137 .name = "physical x2apic",
138 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
139 .int_delivery_mode = dest_Fixed,
140 .int_dest_mode = (APIC_DEST_PHYSICAL != 0),
141 .target_cpus = x2apic_target_cpus,
142 .vector_allocation_domain = x2apic_vector_allocation_domain,
143 .apic_id_registered = x2apic_apic_id_registered,
144 .init_apic_ldr = init_x2apic_ldr,
145 .send_IPI_all = x2apic_send_IPI_all,
146 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
147 .send_IPI_mask = x2apic_send_IPI_mask,
148 .send_IPI_self = x2apic_send_IPI_self,
149 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
150 .phys_pkg_id = phys_pkg_id,
151 .get_apic_id = get_apic_id,
152 .set_apic_id = set_apic_id,
153 .apic_id_mask = (0xFFFFFFFFu),
154};
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index bfa837cb16be..bfd532843df6 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -12,12 +12,12 @@
12#include <linux/threads.h> 12#include <linux/threads.h>
13#include <linux/cpumask.h> 13#include <linux/cpumask.h>
14#include <linux/string.h> 14#include <linux/string.h>
15#include <linux/kernel.h>
16#include <linux/ctype.h> 15#include <linux/ctype.h>
17#include <linux/init.h> 16#include <linux/init.h>
18#include <linux/sched.h> 17#include <linux/sched.h>
19#include <linux/bootmem.h> 18#include <linux/bootmem.h>
20#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/hardirq.h>
21#include <asm/smp.h> 21#include <asm/smp.h>
22#include <asm/ipi.h> 22#include <asm/ipi.h>
23#include <asm/genapic.h> 23#include <asm/genapic.h>
@@ -26,6 +26,36 @@
26#include <asm/uv/uv_hub.h> 26#include <asm/uv/uv_hub.h>
27#include <asm/uv/bios.h> 27#include <asm/uv/bios.h>
28 28
29DEFINE_PER_CPU(int, x2apic_extra_bits);
30
31static enum uv_system_type uv_system_type;
32
33static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
34{
35 if (!strcmp(oem_id, "SGI")) {
36 if (!strcmp(oem_table_id, "UVL"))
37 uv_system_type = UV_LEGACY_APIC;
38 else if (!strcmp(oem_table_id, "UVX"))
39 uv_system_type = UV_X2APIC;
40 else if (!strcmp(oem_table_id, "UVH")) {
41 uv_system_type = UV_NON_UNIQUE_APIC;
42 return 1;
43 }
44 }
45 return 0;
46}
47
48enum uv_system_type get_uv_system_type(void)
49{
50 return uv_system_type;
51}
52
53int is_uv_system(void)
54{
55 return uv_system_type != UV_NONE;
56}
57EXPORT_SYMBOL_GPL(is_uv_system);
58
29DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); 59DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
30EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); 60EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
31 61
@@ -84,7 +114,7 @@ static void uv_send_IPI_one(int cpu, int vector)
84 unsigned long val, apicid, lapicid; 114 unsigned long val, apicid, lapicid;
85 int pnode; 115 int pnode;
86 116
87 apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */ 117 apicid = per_cpu(x86_cpu_to_apicid, cpu);
88 lapicid = apicid & 0x3f; /* ZZZ macro needed */ 118 lapicid = apicid & 0x3f; /* ZZZ macro needed */
89 pnode = uv_apicid_to_pnode(apicid); 119 pnode = uv_apicid_to_pnode(apicid);
90 val = 120 val =
@@ -123,6 +153,10 @@ static int uv_apic_id_registered(void)
123 return 1; 153 return 1;
124} 154}
125 155
156static void uv_init_apic_ldr(void)
157{
158}
159
126static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) 160static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
127{ 161{
128 int cpu; 162 int cpu;
@@ -138,31 +172,59 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
138 return BAD_APICID; 172 return BAD_APICID;
139} 173}
140 174
175static unsigned int get_apic_id(unsigned long x)
176{
177 unsigned int id;
178
179 WARN_ON(preemptible() && num_online_cpus() > 1);
180 id = x | __get_cpu_var(x2apic_extra_bits);
181
182 return id;
183}
184
185static unsigned long set_apic_id(unsigned int id)
186{
187 unsigned long x;
188
189 /* maskout x2apic_extra_bits ? */
190 x = id;
191 return x;
192}
193
194static unsigned int uv_read_apic_id(void)
195{
196
197 return get_apic_id(apic_read(APIC_ID));
198}
199
141static unsigned int phys_pkg_id(int index_msb) 200static unsigned int phys_pkg_id(int index_msb)
142{ 201{
143 return GET_APIC_ID(read_apic_id()) >> index_msb; 202 return uv_read_apic_id() >> index_msb;
144} 203}
145 204
146#ifdef ZZZ /* Needs x2apic patch */
147static void uv_send_IPI_self(int vector) 205static void uv_send_IPI_self(int vector)
148{ 206{
149 apic_write(APIC_SELF_IPI, vector); 207 apic_write(APIC_SELF_IPI, vector);
150} 208}
151#endif
152 209
153struct genapic apic_x2apic_uv_x = { 210struct genapic apic_x2apic_uv_x = {
154 .name = "UV large system", 211 .name = "UV large system",
212 .acpi_madt_oem_check = uv_acpi_madt_oem_check,
155 .int_delivery_mode = dest_Fixed, 213 .int_delivery_mode = dest_Fixed,
156 .int_dest_mode = (APIC_DEST_PHYSICAL != 0), 214 .int_dest_mode = (APIC_DEST_PHYSICAL != 0),
157 .target_cpus = uv_target_cpus, 215 .target_cpus = uv_target_cpus,
158 .vector_allocation_domain = uv_vector_allocation_domain,/* Fixme ZZZ */ 216 .vector_allocation_domain = uv_vector_allocation_domain,
159 .apic_id_registered = uv_apic_id_registered, 217 .apic_id_registered = uv_apic_id_registered,
218 .init_apic_ldr = uv_init_apic_ldr,
160 .send_IPI_all = uv_send_IPI_all, 219 .send_IPI_all = uv_send_IPI_all,
161 .send_IPI_allbutself = uv_send_IPI_allbutself, 220 .send_IPI_allbutself = uv_send_IPI_allbutself,
162 .send_IPI_mask = uv_send_IPI_mask, 221 .send_IPI_mask = uv_send_IPI_mask,
163 /* ZZZ.send_IPI_self = uv_send_IPI_self, */ 222 .send_IPI_self = uv_send_IPI_self,
164 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, 223 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
165 .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */ 224 .phys_pkg_id = phys_pkg_id,
225 .get_apic_id = get_apic_id,
226 .set_apic_id = set_apic_id,
227 .apic_id_mask = (0xFFFFFFFFu),
166}; 228};
167 229
168static __cpuinit void set_x2apic_extra_bits(int pnode) 230static __cpuinit void set_x2apic_extra_bits(int pnode)
@@ -222,12 +284,13 @@ static __init void map_low_mmrs(void)
222 284
223enum map_type {map_wb, map_uc}; 285enum map_type {map_wb, map_uc};
224 286
225static __init void map_high(char *id, unsigned long base, int shift, enum map_type map_type) 287static __init void map_high(char *id, unsigned long base, int shift,
288 int max_pnode, enum map_type map_type)
226{ 289{
227 unsigned long bytes, paddr; 290 unsigned long bytes, paddr;
228 291
229 paddr = base << shift; 292 paddr = base << shift;
230 bytes = (1UL << shift); 293 bytes = (1UL << shift) * (max_pnode + 1);
231 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, 294 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
232 paddr + bytes); 295 paddr + bytes);
233 if (map_type == map_uc) 296 if (map_type == map_uc)
@@ -243,7 +306,7 @@ static __init void map_gru_high(int max_pnode)
243 306
244 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); 307 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
245 if (gru.s.enable) 308 if (gru.s.enable)
246 map_high("GRU", gru.s.base, shift, map_wb); 309 map_high("GRU", gru.s.base, shift, max_pnode, map_wb);
247} 310}
248 311
249static __init void map_config_high(int max_pnode) 312static __init void map_config_high(int max_pnode)
@@ -253,7 +316,7 @@ static __init void map_config_high(int max_pnode)
253 316
254 cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR); 317 cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR);
255 if (cfg.s.enable) 318 if (cfg.s.enable)
256 map_high("CONFIG", cfg.s.base, shift, map_uc); 319 map_high("CONFIG", cfg.s.base, shift, max_pnode, map_uc);
257} 320}
258 321
259static __init void map_mmr_high(int max_pnode) 322static __init void map_mmr_high(int max_pnode)
@@ -263,7 +326,7 @@ static __init void map_mmr_high(int max_pnode)
263 326
264 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); 327 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
265 if (mmr.s.enable) 328 if (mmr.s.enable)
266 map_high("MMR", mmr.s.base, shift, map_uc); 329 map_high("MMR", mmr.s.base, shift, max_pnode, map_uc);
267} 330}
268 331
269static __init void map_mmioh_high(int max_pnode) 332static __init void map_mmioh_high(int max_pnode)
@@ -273,17 +336,17 @@ static __init void map_mmioh_high(int max_pnode)
273 336
274 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); 337 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
275 if (mmioh.s.enable) 338 if (mmioh.s.enable)
276 map_high("MMIOH", mmioh.s.base, shift, map_uc); 339 map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc);
277} 340}
278 341
279static __init void uv_rtc_init(void) 342static __init void uv_rtc_init(void)
280{ 343{
281 long status, ticks_per_sec, drift; 344 long status;
345 u64 ticks_per_sec;
282 346
283 status = 347 status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
284 x86_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec, 348 &ticks_per_sec);
285 &drift); 349 if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
286 if (status != 0 || ticks_per_sec < 100000) {
287 printk(KERN_WARNING 350 printk(KERN_WARNING
288 "unable to determine platform RTC clock frequency, " 351 "unable to determine platform RTC clock frequency, "
289 "guessing.\n"); 352 "guessing.\n");
@@ -293,7 +356,22 @@ static __init void uv_rtc_init(void)
293 sn_rtc_cycles_per_second = ticks_per_sec; 356 sn_rtc_cycles_per_second = ticks_per_sec;
294} 357}
295 358
296static bool uv_system_inited; 359/*
360 * Called on each cpu to initialize the per_cpu UV data area.
361 * ZZZ hotplug not supported yet
362 */
363void __cpuinit uv_cpu_init(void)
364{
365 /* CPU 0 initilization will be done via uv_system_init. */
366 if (!uv_blade_info)
367 return;
368
369 uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
370
371 if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
372 set_x2apic_extra_bits(uv_hub_info->pnode);
373}
374
297 375
298void __init uv_system_init(void) 376void __init uv_system_init(void)
299{ 377{
@@ -349,6 +427,9 @@ void __init uv_system_init(void)
349 gnode_upper = (((unsigned long)node_id.s.node_id) & 427 gnode_upper = (((unsigned long)node_id.s.node_id) &
350 ~((1 << n_val) - 1)) << m_val; 428 ~((1 << n_val) - 1)) << m_val;
351 429
430 uv_bios_init();
431 uv_bios_get_sn_info(0, &uv_type, &sn_partition_id,
432 &uv_coherency_id, &uv_region_size);
352 uv_rtc_init(); 433 uv_rtc_init();
353 434
354 for_each_present_cpu(cpu) { 435 for_each_present_cpu(cpu) {
@@ -370,7 +451,7 @@ void __init uv_system_init(void)
370 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; 451 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
371 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; 452 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
372 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; 453 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
373 uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ 454 uv_cpu_hub_info(cpu)->coherency_domain_number = uv_coherency_id;
374 uv_node_to_blade[nid] = blade; 455 uv_node_to_blade[nid] = blade;
375 uv_cpu_to_blade[cpu] = blade; 456 uv_cpu_to_blade[cpu] = blade;
376 max_pnode = max(pnode, max_pnode); 457 max_pnode = max(pnode, max_pnode);
@@ -385,19 +466,6 @@ void __init uv_system_init(void)
385 map_mmr_high(max_pnode); 466 map_mmr_high(max_pnode);
386 map_config_high(max_pnode); 467 map_config_high(max_pnode);
387 map_mmioh_high(max_pnode); 468 map_mmioh_high(max_pnode);
388 uv_system_inited = true;
389}
390
391/*
392 * Called on each cpu to initialize the per_cpu UV data area.
393 * ZZZ hotplug not supported yet
394 */
395void __cpuinit uv_cpu_init(void)
396{
397 BUG_ON(!uv_system_inited);
398 469
399 uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; 470 uv_cpu_init();
400
401 if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
402 set_x2apic_extra_bits(uv_hub_info->pnode);
403} 471}
diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c
index 3e66bd364a9d..1dcb0f13897e 100644
--- a/arch/x86/kernel/head.c
+++ b/arch/x86/kernel/head.c
@@ -35,6 +35,7 @@ void __init reserve_ebda_region(void)
35 35
36 /* start of EBDA area */ 36 /* start of EBDA area */
37 ebda_addr = get_bios_ebda(); 37 ebda_addr = get_bios_ebda();
38 printk(KERN_INFO "BIOS EBDA/lowmem at: %08x/%08x\n", ebda_addr, lowmem);
38 39
39 /* Fixup: bios puts an EBDA in the top 64K segment */ 40 /* Fixup: bios puts an EBDA in the top 64K segment */
40 /* of conventional memory, but does not adjust lowmem. */ 41 /* of conventional memory, but does not adjust lowmem. */
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 9bfc4d72fb2e..d16084f90649 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -108,12 +108,11 @@ void __init x86_64_start_kernel(char * real_mode_data)
108 } 108 }
109 load_idt((const struct desc_ptr *)&idt_descr); 109 load_idt((const struct desc_ptr *)&idt_descr);
110 110
111 early_printk("Kernel alive\n"); 111 if (console_loglevel == 10)
112 early_printk("Kernel alive\n");
112 113
113 x86_64_init_pda(); 114 x86_64_init_pda();
114 115
115 early_printk("Kernel really alive\n");
116
117 x86_64_start_reservations(real_mode_data); 116 x86_64_start_reservations(real_mode_data);
118} 117}
119 118
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index a7010c3a377a..e835b4eea70b 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -172,10 +172,6 @@ num_subarch_entries = (. - subarch_entries) / 4
172 * 172 *
173 * Note that the stack is not yet set up! 173 * Note that the stack is not yet set up!
174 */ 174 */
175#define PTE_ATTR 0x007 /* PRESENT+RW+USER */
176#define PDE_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
177#define PGD_ATTR 0x001 /* PRESENT (no other attributes) */
178
179default_entry: 175default_entry:
180#ifdef CONFIG_X86_PAE 176#ifdef CONFIG_X86_PAE
181 177
@@ -196,9 +192,9 @@ default_entry:
196 movl $pa(pg0), %edi 192 movl $pa(pg0), %edi
197 movl %edi, pa(init_pg_tables_start) 193 movl %edi, pa(init_pg_tables_start)
198 movl $pa(swapper_pg_pmd), %edx 194 movl $pa(swapper_pg_pmd), %edx
199 movl $PTE_ATTR, %eax 195 movl $PTE_IDENT_ATTR, %eax
20010: 19610:
201 leal PDE_ATTR(%edi),%ecx /* Create PMD entry */ 197 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */
202 movl %ecx,(%edx) /* Store PMD entry */ 198 movl %ecx,(%edx) /* Store PMD entry */
203 /* Upper half already zero */ 199 /* Upper half already zero */
204 addl $8,%edx 200 addl $8,%edx
@@ -215,7 +211,7 @@ default_entry:
215 * End condition: we must map up to and including INIT_MAP_BEYOND_END 211 * End condition: we must map up to and including INIT_MAP_BEYOND_END
216 * bytes beyond the end of our own page tables. 212 * bytes beyond the end of our own page tables.
217 */ 213 */
218 leal (INIT_MAP_BEYOND_END+PTE_ATTR)(%edi),%ebp 214 leal (INIT_MAP_BEYOND_END+PTE_IDENT_ATTR)(%edi),%ebp
219 cmpl %ebp,%eax 215 cmpl %ebp,%eax
220 jb 10b 216 jb 10b
2211: 2171:
@@ -224,7 +220,7 @@ default_entry:
224 movl %eax, pa(max_pfn_mapped) 220 movl %eax, pa(max_pfn_mapped)
225 221
226 /* Do early initialization of the fixmap area */ 222 /* Do early initialization of the fixmap area */
227 movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax 223 movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
228 movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8) 224 movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
229#else /* Not PAE */ 225#else /* Not PAE */
230 226
@@ -233,9 +229,9 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
233 movl $pa(pg0), %edi 229 movl $pa(pg0), %edi
234 movl %edi, pa(init_pg_tables_start) 230 movl %edi, pa(init_pg_tables_start)
235 movl $pa(swapper_pg_dir), %edx 231 movl $pa(swapper_pg_dir), %edx
236 movl $PTE_ATTR, %eax 232 movl $PTE_IDENT_ATTR, %eax
23710: 23310:
238 leal PDE_ATTR(%edi),%ecx /* Create PDE entry */ 234 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */
239 movl %ecx,(%edx) /* Store identity PDE entry */ 235 movl %ecx,(%edx) /* Store identity PDE entry */
240 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ 236 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */
241 addl $4,%edx 237 addl $4,%edx
@@ -249,7 +245,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
249 * bytes beyond the end of our own page tables; the +0x007 is 245 * bytes beyond the end of our own page tables; the +0x007 is
250 * the attribute bits 246 * the attribute bits
251 */ 247 */
252 leal (INIT_MAP_BEYOND_END+PTE_ATTR)(%edi),%ebp 248 leal (INIT_MAP_BEYOND_END+PTE_IDENT_ATTR)(%edi),%ebp
253 cmpl %ebp,%eax 249 cmpl %ebp,%eax
254 jb 10b 250 jb 10b
255 movl %edi,pa(init_pg_tables_end) 251 movl %edi,pa(init_pg_tables_end)
@@ -257,7 +253,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
257 movl %eax, pa(max_pfn_mapped) 253 movl %eax, pa(max_pfn_mapped)
258 254
259 /* Do early initialization of the fixmap area */ 255 /* Do early initialization of the fixmap area */
260 movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax 256 movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
261 movl %eax,pa(swapper_pg_dir+0xffc) 257 movl %eax,pa(swapper_pg_dir+0xffc)
262#endif 258#endif
263 jmp 3f 259 jmp 3f
@@ -634,19 +630,19 @@ ENTRY(empty_zero_page)
634 /* Page-aligned for the benefit of paravirt? */ 630 /* Page-aligned for the benefit of paravirt? */
635 .align PAGE_SIZE_asm 631 .align PAGE_SIZE_asm
636ENTRY(swapper_pg_dir) 632ENTRY(swapper_pg_dir)
637 .long pa(swapper_pg_pmd+PGD_ATTR),0 /* low identity map */ 633 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
638# if KPMDS == 3 634# if KPMDS == 3
639 .long pa(swapper_pg_pmd+PGD_ATTR),0 635 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0
640 .long pa(swapper_pg_pmd+PGD_ATTR+0x1000),0 636 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x1000),0
641 .long pa(swapper_pg_pmd+PGD_ATTR+0x2000),0 637 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x2000),0
642# elif KPMDS == 2 638# elif KPMDS == 2
643 .long 0,0 639 .long 0,0
644 .long pa(swapper_pg_pmd+PGD_ATTR),0 640 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0
645 .long pa(swapper_pg_pmd+PGD_ATTR+0x1000),0 641 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x1000),0
646# elif KPMDS == 1 642# elif KPMDS == 1
647 .long 0,0 643 .long 0,0
648 .long 0,0 644 .long 0,0
649 .long pa(swapper_pg_pmd+PGD_ATTR),0 645 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0
650# else 646# else
651# error "Kernel PMDs should be 1, 2 or 3" 647# error "Kernel PMDs should be 1, 2 or 3"
652# endif 648# endif
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index db3280afe886..26cfdc1d7c7f 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -110,7 +110,7 @@ startup_64:
110 movq %rdi, %rax 110 movq %rdi, %rax
111 shrq $PMD_SHIFT, %rax 111 shrq $PMD_SHIFT, %rax
112 andq $(PTRS_PER_PMD - 1), %rax 112 andq $(PTRS_PER_PMD - 1), %rax
113 leaq __PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx 113 leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
114 leaq level2_spare_pgt(%rip), %rbx 114 leaq level2_spare_pgt(%rip), %rbx
115 movq %rdx, 0(%rbx, %rax, 8) 115 movq %rdx, 0(%rbx, %rax, 8)
116ident_complete: 116ident_complete:
@@ -374,7 +374,7 @@ NEXT_PAGE(level2_ident_pgt)
374 /* Since I easily can, map the first 1G. 374 /* Since I easily can, map the first 1G.
375 * Don't set NX because code runs from these pages. 375 * Don't set NX because code runs from these pages.
376 */ 376 */
377 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD) 377 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
378 378
379NEXT_PAGE(level2_kernel_pgt) 379NEXT_PAGE(level2_kernel_pgt)
380 /* 380 /*
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 73deaffadd03..77017e834cf7 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -1,29 +1,49 @@
1#include <linux/clocksource.h> 1#include <linux/clocksource.h>
2#include <linux/clockchips.h> 2#include <linux/clockchips.h>
3#include <linux/interrupt.h>
4#include <linux/sysdev.h>
3#include <linux/delay.h> 5#include <linux/delay.h>
4#include <linux/errno.h> 6#include <linux/errno.h>
5#include <linux/hpet.h> 7#include <linux/hpet.h>
6#include <linux/init.h> 8#include <linux/init.h>
7#include <linux/sysdev.h> 9#include <linux/cpu.h>
8#include <linux/pm.h> 10#include <linux/pm.h>
11#include <linux/io.h>
9 12
10#include <asm/fixmap.h> 13#include <asm/fixmap.h>
11#include <asm/hpet.h>
12#include <asm/i8253.h> 14#include <asm/i8253.h>
13#include <asm/io.h> 15#include <asm/hpet.h>
14 16
15#define HPET_MASK CLOCKSOURCE_MASK(32) 17#define HPET_MASK CLOCKSOURCE_MASK(32)
16#define HPET_SHIFT 22 18#define HPET_SHIFT 22
17 19
18/* FSEC = 10^-15 20/* FSEC = 10^-15
19 NSEC = 10^-9 */ 21 NSEC = 10^-9 */
20#define FSEC_PER_NSEC 1000000L 22#define FSEC_PER_NSEC 1000000L
23
24#define HPET_DEV_USED_BIT 2
25#define HPET_DEV_USED (1 << HPET_DEV_USED_BIT)
26#define HPET_DEV_VALID 0x8
27#define HPET_DEV_FSB_CAP 0x1000
28#define HPET_DEV_PERI_CAP 0x2000
29
30#define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt)
21 31
22/* 32/*
23 * HPET address is set in acpi/boot.c, when an ACPI entry exists 33 * HPET address is set in acpi/boot.c, when an ACPI entry exists
24 */ 34 */
25unsigned long hpet_address; 35unsigned long hpet_address;
26static void __iomem *hpet_virt_address; 36unsigned long hpet_num_timers;
37static void __iomem *hpet_virt_address;
38
39struct hpet_dev {
40 struct clock_event_device evt;
41 unsigned int num;
42 int cpu;
43 unsigned int irq;
44 unsigned int flags;
45 char name[10];
46};
27 47
28unsigned long hpet_readl(unsigned long a) 48unsigned long hpet_readl(unsigned long a)
29{ 49{
@@ -59,7 +79,7 @@ static inline void hpet_clear_mapping(void)
59static int boot_hpet_disable; 79static int boot_hpet_disable;
60int hpet_force_user; 80int hpet_force_user;
61 81
62static int __init hpet_setup(char* str) 82static int __init hpet_setup(char *str)
63{ 83{
64 if (str) { 84 if (str) {
65 if (!strncmp("disable", str, 7)) 85 if (!strncmp("disable", str, 7))
@@ -80,7 +100,7 @@ __setup("nohpet", disable_hpet);
80 100
81static inline int is_hpet_capable(void) 101static inline int is_hpet_capable(void)
82{ 102{
83 return (!boot_hpet_disable && hpet_address); 103 return !boot_hpet_disable && hpet_address;
84} 104}
85 105
86/* 106/*
@@ -102,6 +122,9 @@ EXPORT_SYMBOL_GPL(is_hpet_enabled);
102 * timer 0 and timer 1 in case of RTC emulation. 122 * timer 0 and timer 1 in case of RTC emulation.
103 */ 123 */
104#ifdef CONFIG_HPET 124#ifdef CONFIG_HPET
125
126static void hpet_reserve_msi_timers(struct hpet_data *hd);
127
105static void hpet_reserve_platform_timers(unsigned long id) 128static void hpet_reserve_platform_timers(unsigned long id)
106{ 129{
107 struct hpet __iomem *hpet = hpet_virt_address; 130 struct hpet __iomem *hpet = hpet_virt_address;
@@ -111,25 +134,31 @@ static void hpet_reserve_platform_timers(unsigned long id)
111 134
112 nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; 135 nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
113 136
114 memset(&hd, 0, sizeof (hd)); 137 memset(&hd, 0, sizeof(hd));
115 hd.hd_phys_address = hpet_address; 138 hd.hd_phys_address = hpet_address;
116 hd.hd_address = hpet; 139 hd.hd_address = hpet;
117 hd.hd_nirqs = nrtimers; 140 hd.hd_nirqs = nrtimers;
118 hd.hd_flags = HPET_DATA_PLATFORM;
119 hpet_reserve_timer(&hd, 0); 141 hpet_reserve_timer(&hd, 0);
120 142
121#ifdef CONFIG_HPET_EMULATE_RTC 143#ifdef CONFIG_HPET_EMULATE_RTC
122 hpet_reserve_timer(&hd, 1); 144 hpet_reserve_timer(&hd, 1);
123#endif 145#endif
124 146
147 /*
148 * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254
149 * is wrong for i8259!) not the output IRQ. Many BIOS writers
150 * don't bother configuring *any* comparator interrupts.
151 */
125 hd.hd_irq[0] = HPET_LEGACY_8254; 152 hd.hd_irq[0] = HPET_LEGACY_8254;
126 hd.hd_irq[1] = HPET_LEGACY_RTC; 153 hd.hd_irq[1] = HPET_LEGACY_RTC;
127 154
128 for (i = 2; i < nrtimers; timer++, i++) { 155 for (i = 2; i < nrtimers; timer++, i++) {
129 hd.hd_irq[i] = (readl(&timer->hpet_config) & Tn_INT_ROUTE_CNF_MASK) >> 156 hd.hd_irq[i] = (readl(&timer->hpet_config) &
130 Tn_INT_ROUTE_CNF_SHIFT; 157 Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT;
131 } 158 }
132 159
160 hpet_reserve_msi_timers(&hd);
161
133 hpet_alloc(&hd); 162 hpet_alloc(&hd);
134 163
135} 164}
@@ -223,60 +252,70 @@ static void hpet_legacy_clockevent_register(void)
223 printk(KERN_DEBUG "hpet clockevent registered\n"); 252 printk(KERN_DEBUG "hpet clockevent registered\n");
224} 253}
225 254
226static void hpet_legacy_set_mode(enum clock_event_mode mode, 255static int hpet_setup_msi_irq(unsigned int irq);
227 struct clock_event_device *evt) 256
257static void hpet_set_mode(enum clock_event_mode mode,
258 struct clock_event_device *evt, int timer)
228{ 259{
229 unsigned long cfg, cmp, now; 260 unsigned long cfg, cmp, now;
230 uint64_t delta; 261 uint64_t delta;
231 262
232 switch(mode) { 263 switch (mode) {
233 case CLOCK_EVT_MODE_PERIODIC: 264 case CLOCK_EVT_MODE_PERIODIC:
234 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * hpet_clockevent.mult; 265 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
235 delta >>= hpet_clockevent.shift; 266 delta >>= evt->shift;
236 now = hpet_readl(HPET_COUNTER); 267 now = hpet_readl(HPET_COUNTER);
237 cmp = now + (unsigned long) delta; 268 cmp = now + (unsigned long) delta;
238 cfg = hpet_readl(HPET_T0_CFG); 269 cfg = hpet_readl(HPET_Tn_CFG(timer));
239 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | 270 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
240 HPET_TN_SETVAL | HPET_TN_32BIT; 271 HPET_TN_SETVAL | HPET_TN_32BIT;
241 hpet_writel(cfg, HPET_T0_CFG); 272 hpet_writel(cfg, HPET_Tn_CFG(timer));
242 /* 273 /*
243 * The first write after writing TN_SETVAL to the 274 * The first write after writing TN_SETVAL to the
244 * config register sets the counter value, the second 275 * config register sets the counter value, the second
245 * write sets the period. 276 * write sets the period.
246 */ 277 */
247 hpet_writel(cmp, HPET_T0_CMP); 278 hpet_writel(cmp, HPET_Tn_CMP(timer));
248 udelay(1); 279 udelay(1);
249 hpet_writel((unsigned long) delta, HPET_T0_CMP); 280 hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer));
250 break; 281 break;
251 282
252 case CLOCK_EVT_MODE_ONESHOT: 283 case CLOCK_EVT_MODE_ONESHOT:
253 cfg = hpet_readl(HPET_T0_CFG); 284 cfg = hpet_readl(HPET_Tn_CFG(timer));
254 cfg &= ~HPET_TN_PERIODIC; 285 cfg &= ~HPET_TN_PERIODIC;
255 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; 286 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
256 hpet_writel(cfg, HPET_T0_CFG); 287 hpet_writel(cfg, HPET_Tn_CFG(timer));
257 break; 288 break;
258 289
259 case CLOCK_EVT_MODE_UNUSED: 290 case CLOCK_EVT_MODE_UNUSED:
260 case CLOCK_EVT_MODE_SHUTDOWN: 291 case CLOCK_EVT_MODE_SHUTDOWN:
261 cfg = hpet_readl(HPET_T0_CFG); 292 cfg = hpet_readl(HPET_Tn_CFG(timer));
262 cfg &= ~HPET_TN_ENABLE; 293 cfg &= ~HPET_TN_ENABLE;
263 hpet_writel(cfg, HPET_T0_CFG); 294 hpet_writel(cfg, HPET_Tn_CFG(timer));
264 break; 295 break;
265 296
266 case CLOCK_EVT_MODE_RESUME: 297 case CLOCK_EVT_MODE_RESUME:
267 hpet_enable_legacy_int(); 298 if (timer == 0) {
299 hpet_enable_legacy_int();
300 } else {
301 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
302 hpet_setup_msi_irq(hdev->irq);
303 disable_irq(hdev->irq);
304 irq_set_affinity(hdev->irq, cpumask_of_cpu(hdev->cpu));
305 enable_irq(hdev->irq);
306 }
268 break; 307 break;
269 } 308 }
270} 309}
271 310
272static int hpet_legacy_next_event(unsigned long delta, 311static int hpet_next_event(unsigned long delta,
273 struct clock_event_device *evt) 312 struct clock_event_device *evt, int timer)
274{ 313{
275 u32 cnt; 314 u32 cnt;
276 315
277 cnt = hpet_readl(HPET_COUNTER); 316 cnt = hpet_readl(HPET_COUNTER);
278 cnt += (u32) delta; 317 cnt += (u32) delta;
279 hpet_writel(cnt, HPET_T0_CMP); 318 hpet_writel(cnt, HPET_Tn_CMP(timer));
280 319
281 /* 320 /*
282 * We need to read back the CMP register to make sure that 321 * We need to read back the CMP register to make sure that
@@ -288,6 +327,347 @@ static int hpet_legacy_next_event(unsigned long delta,
288 return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; 327 return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
289} 328}
290 329
330static void hpet_legacy_set_mode(enum clock_event_mode mode,
331 struct clock_event_device *evt)
332{
333 hpet_set_mode(mode, evt, 0);
334}
335
336static int hpet_legacy_next_event(unsigned long delta,
337 struct clock_event_device *evt)
338{
339 return hpet_next_event(delta, evt, 0);
340}
341
342/*
343 * HPET MSI Support
344 */
345#ifdef CONFIG_PCI_MSI
346
347static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev);
348static struct hpet_dev *hpet_devs;
349
350void hpet_msi_unmask(unsigned int irq)
351{
352 struct hpet_dev *hdev = get_irq_data(irq);
353 unsigned long cfg;
354
355 /* unmask it */
356 cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
357 cfg |= HPET_TN_FSB;
358 hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
359}
360
361void hpet_msi_mask(unsigned int irq)
362{
363 unsigned long cfg;
364 struct hpet_dev *hdev = get_irq_data(irq);
365
366 /* mask it */
367 cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
368 cfg &= ~HPET_TN_FSB;
369 hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
370}
371
372void hpet_msi_write(unsigned int irq, struct msi_msg *msg)
373{
374 struct hpet_dev *hdev = get_irq_data(irq);
375
376 hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num));
377 hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4);
378}
379
380void hpet_msi_read(unsigned int irq, struct msi_msg *msg)
381{
382 struct hpet_dev *hdev = get_irq_data(irq);
383
384 msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num));
385 msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4);
386 msg->address_hi = 0;
387}
388
389static void hpet_msi_set_mode(enum clock_event_mode mode,
390 struct clock_event_device *evt)
391{
392 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
393 hpet_set_mode(mode, evt, hdev->num);
394}
395
396static int hpet_msi_next_event(unsigned long delta,
397 struct clock_event_device *evt)
398{
399 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
400 return hpet_next_event(delta, evt, hdev->num);
401}
402
403static int hpet_setup_msi_irq(unsigned int irq)
404{
405 if (arch_setup_hpet_msi(irq)) {
406 destroy_irq(irq);
407 return -EINVAL;
408 }
409 return 0;
410}
411
412static int hpet_assign_irq(struct hpet_dev *dev)
413{
414 unsigned int irq;
415
416 irq = create_irq();
417 if (!irq)
418 return -EINVAL;
419
420 set_irq_data(irq, dev);
421
422 if (hpet_setup_msi_irq(irq))
423 return -EINVAL;
424
425 dev->irq = irq;
426 return 0;
427}
428
429static irqreturn_t hpet_interrupt_handler(int irq, void *data)
430{
431 struct hpet_dev *dev = (struct hpet_dev *)data;
432 struct clock_event_device *hevt = &dev->evt;
433
434 if (!hevt->event_handler) {
435 printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n",
436 dev->num);
437 return IRQ_HANDLED;
438 }
439
440 hevt->event_handler(hevt);
441 return IRQ_HANDLED;
442}
443
444static int hpet_setup_irq(struct hpet_dev *dev)
445{
446
447 if (request_irq(dev->irq, hpet_interrupt_handler,
448 IRQF_SHARED|IRQF_NOBALANCING, dev->name, dev))
449 return -1;
450
451 disable_irq(dev->irq);
452 irq_set_affinity(dev->irq, cpumask_of_cpu(dev->cpu));
453 enable_irq(dev->irq);
454
455 printk(KERN_DEBUG "hpet: %s irq %d for MSI\n",
456 dev->name, dev->irq);
457
458 return 0;
459}
460
461/* This should be called in specific @cpu */
462static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
463{
464 struct clock_event_device *evt = &hdev->evt;
465 uint64_t hpet_freq;
466
467 WARN_ON(cpu != smp_processor_id());
468 if (!(hdev->flags & HPET_DEV_VALID))
469 return;
470
471 if (hpet_setup_msi_irq(hdev->irq))
472 return;
473
474 hdev->cpu = cpu;
475 per_cpu(cpu_hpet_dev, cpu) = hdev;
476 evt->name = hdev->name;
477 hpet_setup_irq(hdev);
478 evt->irq = hdev->irq;
479
480 evt->rating = 110;
481 evt->features = CLOCK_EVT_FEAT_ONESHOT;
482 if (hdev->flags & HPET_DEV_PERI_CAP)
483 evt->features |= CLOCK_EVT_FEAT_PERIODIC;
484
485 evt->set_mode = hpet_msi_set_mode;
486 evt->set_next_event = hpet_msi_next_event;
487 evt->shift = 32;
488
489 /*
490 * The period is a femto seconds value. We need to calculate the
491 * scaled math multiplication factor for nanosecond to hpet tick
492 * conversion.
493 */
494 hpet_freq = 1000000000000000ULL;
495 do_div(hpet_freq, hpet_period);
496 evt->mult = div_sc((unsigned long) hpet_freq,
497 NSEC_PER_SEC, evt->shift);
498 /* Calculate the max delta */
499 evt->max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, evt);
500 /* 5 usec minimum reprogramming delta. */
501 evt->min_delta_ns = 5000;
502
503 evt->cpumask = cpumask_of_cpu(hdev->cpu);
504 clockevents_register_device(evt);
505}
506
507#ifdef CONFIG_HPET
508/* Reserve at least one timer for userspace (/dev/hpet) */
509#define RESERVE_TIMERS 1
510#else
511#define RESERVE_TIMERS 0
512#endif
513
514static void hpet_msi_capability_lookup(unsigned int start_timer)
515{
516 unsigned int id;
517 unsigned int num_timers;
518 unsigned int num_timers_used = 0;
519 int i;
520
521 id = hpet_readl(HPET_ID);
522
523 num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
524 num_timers++; /* Value read out starts from 0 */
525
526 hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL);
527 if (!hpet_devs)
528 return;
529
530 hpet_num_timers = num_timers;
531
532 for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) {
533 struct hpet_dev *hdev = &hpet_devs[num_timers_used];
534 unsigned long cfg = hpet_readl(HPET_Tn_CFG(i));
535
536 /* Only consider HPET timer with MSI support */
537 if (!(cfg & HPET_TN_FSB_CAP))
538 continue;
539
540 hdev->flags = 0;
541 if (cfg & HPET_TN_PERIODIC_CAP)
542 hdev->flags |= HPET_DEV_PERI_CAP;
543 hdev->num = i;
544
545 sprintf(hdev->name, "hpet%d", i);
546 if (hpet_assign_irq(hdev))
547 continue;
548
549 hdev->flags |= HPET_DEV_FSB_CAP;
550 hdev->flags |= HPET_DEV_VALID;
551 num_timers_used++;
552 if (num_timers_used == num_possible_cpus())
553 break;
554 }
555
556 printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n",
557 num_timers, num_timers_used);
558}
559
560#ifdef CONFIG_HPET
561static void hpet_reserve_msi_timers(struct hpet_data *hd)
562{
563 int i;
564
565 if (!hpet_devs)
566 return;
567
568 for (i = 0; i < hpet_num_timers; i++) {
569 struct hpet_dev *hdev = &hpet_devs[i];
570
571 if (!(hdev->flags & HPET_DEV_VALID))
572 continue;
573
574 hd->hd_irq[hdev->num] = hdev->irq;
575 hpet_reserve_timer(hd, hdev->num);
576 }
577}
578#endif
579
580static struct hpet_dev *hpet_get_unused_timer(void)
581{
582 int i;
583
584 if (!hpet_devs)
585 return NULL;
586
587 for (i = 0; i < hpet_num_timers; i++) {
588 struct hpet_dev *hdev = &hpet_devs[i];
589
590 if (!(hdev->flags & HPET_DEV_VALID))
591 continue;
592 if (test_and_set_bit(HPET_DEV_USED_BIT,
593 (unsigned long *)&hdev->flags))
594 continue;
595 return hdev;
596 }
597 return NULL;
598}
599
600struct hpet_work_struct {
601 struct delayed_work work;
602 struct completion complete;
603};
604
605static void hpet_work(struct work_struct *w)
606{
607 struct hpet_dev *hdev;
608 int cpu = smp_processor_id();
609 struct hpet_work_struct *hpet_work;
610
611 hpet_work = container_of(w, struct hpet_work_struct, work.work);
612
613 hdev = hpet_get_unused_timer();
614 if (hdev)
615 init_one_hpet_msi_clockevent(hdev, cpu);
616
617 complete(&hpet_work->complete);
618}
619
620static int hpet_cpuhp_notify(struct notifier_block *n,
621 unsigned long action, void *hcpu)
622{
623 unsigned long cpu = (unsigned long)hcpu;
624 struct hpet_work_struct work;
625 struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);
626
627 switch (action & 0xf) {
628 case CPU_ONLINE:
629 INIT_DELAYED_WORK(&work.work, hpet_work);
630 init_completion(&work.complete);
631 /* FIXME: add schedule_work_on() */
632 schedule_delayed_work_on(cpu, &work.work, 0);
633 wait_for_completion(&work.complete);
634 break;
635 case CPU_DEAD:
636 if (hdev) {
637 free_irq(hdev->irq, hdev);
638 hdev->flags &= ~HPET_DEV_USED;
639 per_cpu(cpu_hpet_dev, cpu) = NULL;
640 }
641 break;
642 }
643 return NOTIFY_OK;
644}
645#else
646
647static int hpet_setup_msi_irq(unsigned int irq)
648{
649 return 0;
650}
651static void hpet_msi_capability_lookup(unsigned int start_timer)
652{
653 return;
654}
655
656#ifdef CONFIG_HPET
657static void hpet_reserve_msi_timers(struct hpet_data *hd)
658{
659 return;
660}
661#endif
662
663static int hpet_cpuhp_notify(struct notifier_block *n,
664 unsigned long action, void *hcpu)
665{
666 return NOTIFY_OK;
667}
668
669#endif
670
291/* 671/*
292 * Clock source related code 672 * Clock source related code
293 */ 673 */
@@ -423,8 +803,10 @@ int __init hpet_enable(void)
423 803
424 if (id & HPET_ID_LEGSUP) { 804 if (id & HPET_ID_LEGSUP) {
425 hpet_legacy_clockevent_register(); 805 hpet_legacy_clockevent_register();
806 hpet_msi_capability_lookup(2);
426 return 1; 807 return 1;
427 } 808 }
809 hpet_msi_capability_lookup(0);
428 return 0; 810 return 0;
429 811
430out_nohpet: 812out_nohpet:
@@ -441,6 +823,8 @@ out_nohpet:
441 */ 823 */
442static __init int hpet_late_init(void) 824static __init int hpet_late_init(void)
443{ 825{
826 int cpu;
827
444 if (boot_hpet_disable) 828 if (boot_hpet_disable)
445 return -ENODEV; 829 return -ENODEV;
446 830
@@ -456,6 +840,13 @@ static __init int hpet_late_init(void)
456 840
457 hpet_reserve_platform_timers(hpet_readl(HPET_ID)); 841 hpet_reserve_platform_timers(hpet_readl(HPET_ID));
458 842
843 for_each_online_cpu(cpu) {
844 hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
845 }
846
847 /* This notifier should be called after workqueue is ready */
848 hotcpu_notifier(hpet_cpuhp_notify, -20);
849
459 return 0; 850 return 0;
460} 851}
461fs_initcall(hpet_late_init); 852fs_initcall(hpet_late_init);
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index eb9ddd8efb82..1f20608d4ca8 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -21,9 +21,12 @@
21# include <asm/sigcontext32.h> 21# include <asm/sigcontext32.h>
22# include <asm/user32.h> 22# include <asm/user32.h>
23#else 23#else
24# define save_i387_ia32 save_i387 24# define save_i387_xstate_ia32 save_i387_xstate
25# define restore_i387_ia32 restore_i387 25# define restore_i387_xstate_ia32 restore_i387_xstate
26# define _fpstate_ia32 _fpstate 26# define _fpstate_ia32 _fpstate
27# define _xstate_ia32 _xstate
28# define sig_xstate_ia32_size sig_xstate_size
29# define fx_sw_reserved_ia32 fx_sw_reserved
27# define user_i387_ia32_struct user_i387_struct 30# define user_i387_ia32_struct user_i387_struct
28# define user32_fxsr_struct user_fxsr_struct 31# define user32_fxsr_struct user_fxsr_struct
29#endif 32#endif
@@ -36,6 +39,7 @@
36 39
37static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; 40static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
38unsigned int xstate_size; 41unsigned int xstate_size;
42unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32);
39static struct i387_fxsave_struct fx_scratch __cpuinitdata; 43static struct i387_fxsave_struct fx_scratch __cpuinitdata;
40 44
41void __cpuinit mxcsr_feature_mask_init(void) 45void __cpuinit mxcsr_feature_mask_init(void)
@@ -61,6 +65,11 @@ void __init init_thread_xstate(void)
61 return; 65 return;
62 } 66 }
63 67
68 if (cpu_has_xsave) {
69 xsave_cntxt_init();
70 return;
71 }
72
64 if (cpu_has_fxsr) 73 if (cpu_has_fxsr)
65 xstate_size = sizeof(struct i387_fxsave_struct); 74 xstate_size = sizeof(struct i387_fxsave_struct);
66#ifdef CONFIG_X86_32 75#ifdef CONFIG_X86_32
@@ -83,9 +92,19 @@ void __cpuinit fpu_init(void)
83 92
84 write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */ 93 write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */
85 94
95 /*
96 * Boot processor to setup the FP and extended state context info.
97 */
98 if (!smp_processor_id())
99 init_thread_xstate();
100 xsave_init();
101
86 mxcsr_feature_mask_init(); 102 mxcsr_feature_mask_init();
87 /* clean state in init */ 103 /* clean state in init */
88 current_thread_info()->status = 0; 104 if (cpu_has_xsave)
105 current_thread_info()->status = TS_XSAVE;
106 else
107 current_thread_info()->status = 0;
89 clear_used_math(); 108 clear_used_math();
90} 109}
91#endif /* CONFIG_X86_64 */ 110#endif /* CONFIG_X86_64 */
@@ -195,6 +214,13 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
195 */ 214 */
196 target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; 215 target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
197 216
217 /*
218 * update the header bits in the xsave header, indicating the
219 * presence of FP and SSE state.
220 */
221 if (cpu_has_xsave)
222 target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
223
198 return ret; 224 return ret;
199} 225}
200 226
@@ -395,6 +421,12 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
395 if (!ret) 421 if (!ret)
396 convert_to_fxsr(target, &env); 422 convert_to_fxsr(target, &env);
397 423
424 /*
425 * update the header bit in the xsave header, indicating the
426 * presence of FP.
427 */
428 if (cpu_has_xsave)
429 target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
398 return ret; 430 return ret;
399} 431}
400 432
@@ -407,7 +439,6 @@ static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
407 struct task_struct *tsk = current; 439 struct task_struct *tsk = current;
408 struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; 440 struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave;
409 441
410 unlazy_fpu(tsk);
411 fp->status = fp->swd; 442 fp->status = fp->swd;
412 if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct))) 443 if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct)))
413 return -1; 444 return -1;
@@ -421,8 +452,6 @@ static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
421 struct user_i387_ia32_struct env; 452 struct user_i387_ia32_struct env;
422 int err = 0; 453 int err = 0;
423 454
424 unlazy_fpu(tsk);
425
426 convert_from_fxsr(&env, tsk); 455 convert_from_fxsr(&env, tsk);
427 if (__copy_to_user(buf, &env, sizeof(env))) 456 if (__copy_to_user(buf, &env, sizeof(env)))
428 return -1; 457 return -1;
@@ -432,16 +461,54 @@ static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
432 if (err) 461 if (err)
433 return -1; 462 return -1;
434 463
435 if (__copy_to_user(&buf->_fxsr_env[0], fx, 464 if (__copy_to_user(&buf->_fxsr_env[0], fx, xstate_size))
436 sizeof(struct i387_fxsave_struct))) 465 return -1;
466 return 1;
467}
468
469static int save_i387_xsave(void __user *buf)
470{
471 struct task_struct *tsk = current;
472 struct _fpstate_ia32 __user *fx = buf;
473 int err = 0;
474
475 /*
476 * For legacy compatible, we always set FP/SSE bits in the bit
477 * vector while saving the state to the user context.
478 * This will enable us capturing any changes(during sigreturn) to
479 * the FP/SSE bits by the legacy applications which don't touch
480 * xstate_bv in the xsave header.
481 *
482 * xsave aware applications can change the xstate_bv in the xsave
483 * header as well as change any contents in the memory layout.
484 * xrestore as part of sigreturn will capture all the changes.
485 */
486 tsk->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
487
488 if (save_i387_fxsave(fx) < 0)
489 return -1;
490
491 err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved_ia32,
492 sizeof(struct _fpx_sw_bytes));
493 err |= __put_user(FP_XSTATE_MAGIC2,
494 (__u32 __user *) (buf + sig_xstate_ia32_size
495 - FP_XSTATE_MAGIC2_SIZE));
496 if (err)
437 return -1; 497 return -1;
498
438 return 1; 499 return 1;
439} 500}
440 501
441int save_i387_ia32(struct _fpstate_ia32 __user *buf) 502int save_i387_xstate_ia32(void __user *buf)
442{ 503{
504 struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf;
505 struct task_struct *tsk = current;
506
443 if (!used_math()) 507 if (!used_math())
444 return 0; 508 return 0;
509
510 if (!access_ok(VERIFY_WRITE, buf, sig_xstate_ia32_size))
511 return -EACCES;
445 /* 512 /*
446 * This will cause a "finit" to be triggered by the next 513 * This will cause a "finit" to be triggered by the next
447 * attempted FPU operation by the 'current' process. 514 * attempted FPU operation by the 'current' process.
@@ -451,13 +518,17 @@ int save_i387_ia32(struct _fpstate_ia32 __user *buf)
451 if (!HAVE_HWFP) { 518 if (!HAVE_HWFP) {
452 return fpregs_soft_get(current, NULL, 519 return fpregs_soft_get(current, NULL,
453 0, sizeof(struct user_i387_ia32_struct), 520 0, sizeof(struct user_i387_ia32_struct),
454 NULL, buf) ? -1 : 1; 521 NULL, fp) ? -1 : 1;
455 } 522 }
456 523
524 unlazy_fpu(tsk);
525
526 if (cpu_has_xsave)
527 return save_i387_xsave(fp);
457 if (cpu_has_fxsr) 528 if (cpu_has_fxsr)
458 return save_i387_fxsave(buf); 529 return save_i387_fxsave(fp);
459 else 530 else
460 return save_i387_fsave(buf); 531 return save_i387_fsave(fp);
461} 532}
462 533
463static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) 534static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
@@ -468,14 +539,15 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
468 sizeof(struct i387_fsave_struct)); 539 sizeof(struct i387_fsave_struct));
469} 540}
470 541
471static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) 542static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
543 unsigned int size)
472{ 544{
473 struct task_struct *tsk = current; 545 struct task_struct *tsk = current;
474 struct user_i387_ia32_struct env; 546 struct user_i387_ia32_struct env;
475 int err; 547 int err;
476 548
477 err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0], 549 err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0],
478 sizeof(struct i387_fxsave_struct)); 550 size);
479 /* mxcsr reserved bits must be masked to zero for security reasons */ 551 /* mxcsr reserved bits must be masked to zero for security reasons */
480 tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; 552 tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
481 if (err || __copy_from_user(&env, buf, sizeof(env))) 553 if (err || __copy_from_user(&env, buf, sizeof(env)))
@@ -485,14 +557,69 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf)
485 return 0; 557 return 0;
486} 558}
487 559
488int restore_i387_ia32(struct _fpstate_ia32 __user *buf) 560static int restore_i387_xsave(void __user *buf)
561{
562 struct _fpx_sw_bytes fx_sw_user;
563 struct _fpstate_ia32 __user *fx_user =
564 ((struct _fpstate_ia32 __user *) buf);
565 struct i387_fxsave_struct __user *fx =
566 (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0];
567 struct xsave_hdr_struct *xsave_hdr =
568 &current->thread.xstate->xsave.xsave_hdr;
569 u64 mask;
570 int err;
571
572 if (check_for_xstate(fx, buf, &fx_sw_user))
573 goto fx_only;
574
575 mask = fx_sw_user.xstate_bv;
576
577 err = restore_i387_fxsave(buf, fx_sw_user.xstate_size);
578
579 xsave_hdr->xstate_bv &= pcntxt_mask;
580 /*
581 * These bits must be zero.
582 */
583 xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0;
584
585 /*
586 * Init the state that is not present in the memory layout
587 * and enabled by the OS.
588 */
589 mask = ~(pcntxt_mask & ~mask);
590 xsave_hdr->xstate_bv &= mask;
591
592 return err;
593fx_only:
594 /*
595 * Couldn't find the extended state information in the memory
596 * layout. Restore the FP/SSE and init the other extended state
597 * enabled by the OS.
598 */
599 xsave_hdr->xstate_bv = XSTATE_FPSSE;
600 return restore_i387_fxsave(buf, sizeof(struct i387_fxsave_struct));
601}
602
603int restore_i387_xstate_ia32(void __user *buf)
489{ 604{
490 int err; 605 int err;
491 struct task_struct *tsk = current; 606 struct task_struct *tsk = current;
607 struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf;
492 608
493 if (HAVE_HWFP) 609 if (HAVE_HWFP)
494 clear_fpu(tsk); 610 clear_fpu(tsk);
495 611
612 if (!buf) {
613 if (used_math()) {
614 clear_fpu(tsk);
615 clear_used_math();
616 }
617
618 return 0;
619 } else
620 if (!access_ok(VERIFY_READ, buf, sig_xstate_ia32_size))
621 return -EACCES;
622
496 if (!used_math()) { 623 if (!used_math()) {
497 err = init_fpu(tsk); 624 err = init_fpu(tsk);
498 if (err) 625 if (err)
@@ -500,14 +627,17 @@ int restore_i387_ia32(struct _fpstate_ia32 __user *buf)
500 } 627 }
501 628
502 if (HAVE_HWFP) { 629 if (HAVE_HWFP) {
503 if (cpu_has_fxsr) 630 if (cpu_has_xsave)
504 err = restore_i387_fxsave(buf); 631 err = restore_i387_xsave(buf);
632 else if (cpu_has_fxsr)
633 err = restore_i387_fxsave(fp, sizeof(struct
634 i387_fxsave_struct));
505 else 635 else
506 err = restore_i387_fsave(buf); 636 err = restore_i387_fsave(fp);
507 } else { 637 } else {
508 err = fpregs_soft_set(current, NULL, 638 err = fpregs_soft_set(current, NULL,
509 0, sizeof(struct user_i387_ia32_struct), 639 0, sizeof(struct user_i387_ia32_struct),
510 NULL, buf) != 0; 640 NULL, fp) != 0;
511 } 641 }
512 set_used_math(); 642 set_used_math();
513 643
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index dc92b49d9204..4b8a53d841f7 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -282,6 +282,30 @@ static int __init i8259A_init_sysfs(void)
282 282
283device_initcall(i8259A_init_sysfs); 283device_initcall(i8259A_init_sysfs);
284 284
285void mask_8259A(void)
286{
287 unsigned long flags;
288
289 spin_lock_irqsave(&i8259A_lock, flags);
290
291 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
292 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
293
294 spin_unlock_irqrestore(&i8259A_lock, flags);
295}
296
297void unmask_8259A(void)
298{
299 unsigned long flags;
300
301 spin_lock_irqsave(&i8259A_lock, flags);
302
303 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
304 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
305
306 spin_unlock_irqrestore(&i8259A_lock, flags);
307}
308
285void init_8259A(int auto_eoi) 309void init_8259A(int auto_eoi)
286{ 310{
287 unsigned long flags; 311 unsigned long flags;
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic.c
index 61a83b70c18f..b764d7429c61 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic.c
@@ -27,16 +27,21 @@
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/mc146818rtc.h> 29#include <linux/mc146818rtc.h>
30#include <linux/compiler.h>
30#include <linux/acpi.h> 31#include <linux/acpi.h>
32#include <linux/module.h>
31#include <linux/sysdev.h> 33#include <linux/sysdev.h>
32#include <linux/msi.h> 34#include <linux/msi.h>
33#include <linux/htirq.h> 35#include <linux/htirq.h>
34#include <linux/dmar.h> 36#include <linux/freezer.h>
35#include <linux/jiffies.h> 37#include <linux/kthread.h>
38#include <linux/jiffies.h> /* time_after() */
36#ifdef CONFIG_ACPI 39#ifdef CONFIG_ACPI
37#include <acpi/acpi_bus.h> 40#include <acpi/acpi_bus.h>
38#endif 41#endif
39#include <linux/bootmem.h> 42#include <linux/bootmem.h>
43#include <linux/dmar.h>
44#include <linux/hpet.h>
40 45
41#include <asm/idle.h> 46#include <asm/idle.h>
42#include <asm/io.h> 47#include <asm/io.h>
@@ -45,60 +50,28 @@
45#include <asm/proto.h> 50#include <asm/proto.h>
46#include <asm/acpi.h> 51#include <asm/acpi.h>
47#include <asm/dma.h> 52#include <asm/dma.h>
53#include <asm/timer.h>
48#include <asm/i8259.h> 54#include <asm/i8259.h>
49#include <asm/nmi.h> 55#include <asm/nmi.h>
50#include <asm/msidef.h> 56#include <asm/msidef.h>
51#include <asm/hypertransport.h> 57#include <asm/hypertransport.h>
58#include <asm/setup.h>
59#include <asm/irq_remapping.h>
60#include <asm/hpet.h>
61#include <asm/uv/uv_hub.h>
62#include <asm/uv/uv_irq.h>
52 63
53#include <mach_ipi.h> 64#include <mach_ipi.h>
54#include <mach_apic.h> 65#include <mach_apic.h>
66#include <mach_apicdef.h>
55 67
56struct irq_cfg { 68#define __apicdebuginit(type) static type __init
57 cpumask_t domain;
58 cpumask_t old_domain;
59 unsigned move_cleanup_count;
60 u8 vector;
61 u8 move_in_progress : 1;
62};
63
64/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
65static struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
66 [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
67 [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
68 [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
69 [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
70 [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
71 [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
72 [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
73 [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
74 [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
75 [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
76 [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
77 [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
78 [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
79 [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
80 [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
81 [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
82};
83
84static int assign_irq_vector(int irq, cpumask_t mask);
85 69
86int first_system_vector = 0xfe; 70/*
87 71 * Is the SiS APIC rmw bug present ?
88char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE}; 72 * -1 = don't know, 0 = no, 1 = yes
89 73 */
90#define __apicdebuginit __init 74int sis_apic_bug = -1;
91
92int sis_apic_bug; /* not actually supported, dummy for compile */
93
94static int no_timer_check;
95
96static int disable_timer_pin_1 __initdata;
97
98int timer_through_8259 __initdata;
99
100/* Where if anywhere is the i8259 connect in external int mode */
101static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
102 75
103static DEFINE_SPINLOCK(ioapic_lock); 76static DEFINE_SPINLOCK(ioapic_lock);
104static DEFINE_SPINLOCK(vector_lock); 77static DEFINE_SPINLOCK(vector_lock);
@@ -118,11 +91,69 @@ struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
118/* # of MP IRQ source entries */ 91/* # of MP IRQ source entries */
119int mp_irq_entries; 92int mp_irq_entries;
120 93
94#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
95int mp_bus_id_to_type[MAX_MP_BUSSES];
96#endif
97
121DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); 98DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
122 99
100int skip_ioapic_setup;
101
102static int __init parse_noapic(char *str)
103{
104 /* disable IO-APIC */
105 disable_ioapic_setup();
106 return 0;
107}
108early_param("noapic", parse_noapic);
109
110struct irq_pin_list;
111struct irq_cfg {
112 unsigned int irq;
113 struct irq_pin_list *irq_2_pin;
114 cpumask_t domain;
115 cpumask_t old_domain;
116 unsigned move_cleanup_count;
117 u8 vector;
118 u8 move_in_progress : 1;
119};
120
121/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
122static struct irq_cfg irq_cfgx[NR_IRQS] = {
123 [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
124 [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
125 [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
126 [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
127 [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
128 [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
129 [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
130 [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
131 [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
132 [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
133 [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
134 [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
135 [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
136 [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
137 [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
138 [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
139};
140
141#define for_each_irq_cfg(irq, cfg) \
142 for (irq = 0, cfg = irq_cfgx; irq < nr_irqs; irq++, cfg++)
143
144static struct irq_cfg *irq_cfg(unsigned int irq)
145{
146 return irq < nr_irqs ? irq_cfgx + irq : NULL;
147}
148
149static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
150{
151 return irq_cfg(irq);
152}
153
123/* 154/*
124 * Rough estimation of how many shared IRQs there are, can 155 * Rough estimation of how many shared IRQs there are, can be changed
125 * be changed anytime. 156 * anytime.
126 */ 157 */
127#define MAX_PLUS_SHARED_IRQS NR_IRQS 158#define MAX_PLUS_SHARED_IRQS NR_IRQS
128#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS) 159#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
@@ -134,9 +165,36 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
134 * between pins and IRQs. 165 * between pins and IRQs.
135 */ 166 */
136 167
137static struct irq_pin_list { 168struct irq_pin_list {
138 short apic, pin, next; 169 int apic, pin;
139} irq_2_pin[PIN_MAP_SIZE]; 170 struct irq_pin_list *next;
171};
172
173static struct irq_pin_list irq_2_pin_head[PIN_MAP_SIZE];
174static struct irq_pin_list *irq_2_pin_ptr;
175
176static void __init irq_2_pin_init(void)
177{
178 struct irq_pin_list *pin = irq_2_pin_head;
179 int i;
180
181 for (i = 1; i < PIN_MAP_SIZE; i++)
182 pin[i-1].next = &pin[i];
183
184 irq_2_pin_ptr = &pin[0];
185}
186
187static struct irq_pin_list *get_one_free_irq_2_pin(void)
188{
189 struct irq_pin_list *pin = irq_2_pin_ptr;
190
191 if (!pin)
192 panic("can not get more irq_2_pin\n");
193
194 irq_2_pin_ptr = pin->next;
195 pin->next = NULL;
196 return pin;
197}
140 198
141struct io_apic { 199struct io_apic {
142 unsigned int index; 200 unsigned int index;
@@ -167,10 +225,15 @@ static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned i
167/* 225/*
168 * Re-write a value: to be used for read-modify-write 226 * Re-write a value: to be used for read-modify-write
169 * cycles where the read already set up the index register. 227 * cycles where the read already set up the index register.
228 *
229 * Older SiS APIC requires we rewrite the index register
170 */ 230 */
171static inline void io_apic_modify(unsigned int apic, unsigned int value) 231static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
172{ 232{
173 struct io_apic __iomem *io_apic = io_apic_base(apic); 233 struct io_apic __iomem *io_apic = io_apic_base(apic);
234
235 if (sis_apic_bug)
236 writel(reg, &io_apic->index);
174 writel(value, &io_apic->data); 237 writel(value, &io_apic->data);
175} 238}
176 239
@@ -178,16 +241,17 @@ static bool io_apic_level_ack_pending(unsigned int irq)
178{ 241{
179 struct irq_pin_list *entry; 242 struct irq_pin_list *entry;
180 unsigned long flags; 243 unsigned long flags;
244 struct irq_cfg *cfg = irq_cfg(irq);
181 245
182 spin_lock_irqsave(&ioapic_lock, flags); 246 spin_lock_irqsave(&ioapic_lock, flags);
183 entry = irq_2_pin + irq; 247 entry = cfg->irq_2_pin;
184 for (;;) { 248 for (;;) {
185 unsigned int reg; 249 unsigned int reg;
186 int pin; 250 int pin;
187 251
188 pin = entry->pin; 252 if (!entry)
189 if (pin == -1)
190 break; 253 break;
254 pin = entry->pin;
191 reg = io_apic_read(entry->apic, 0x10 + pin*2); 255 reg = io_apic_read(entry->apic, 0x10 + pin*2);
192 /* Is the remote IRR bit set? */ 256 /* Is the remote IRR bit set? */
193 if (reg & IO_APIC_REDIR_REMOTE_IRR) { 257 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
@@ -196,45 +260,13 @@ static bool io_apic_level_ack_pending(unsigned int irq)
196 } 260 }
197 if (!entry->next) 261 if (!entry->next)
198 break; 262 break;
199 entry = irq_2_pin + entry->next; 263 entry = entry->next;
200 } 264 }
201 spin_unlock_irqrestore(&ioapic_lock, flags); 265 spin_unlock_irqrestore(&ioapic_lock, flags);
202 266
203 return false; 267 return false;
204} 268}
205 269
206/*
207 * Synchronize the IO-APIC and the CPU by doing
208 * a dummy read from the IO-APIC
209 */
210static inline void io_apic_sync(unsigned int apic)
211{
212 struct io_apic __iomem *io_apic = io_apic_base(apic);
213 readl(&io_apic->data);
214}
215
216#define __DO_ACTION(R, ACTION, FINAL) \
217 \
218{ \
219 int pin; \
220 struct irq_pin_list *entry = irq_2_pin + irq; \
221 \
222 BUG_ON(irq >= NR_IRQS); \
223 for (;;) { \
224 unsigned int reg; \
225 pin = entry->pin; \
226 if (pin == -1) \
227 break; \
228 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
229 reg ACTION; \
230 io_apic_modify(entry->apic, reg); \
231 FINAL; \
232 if (!entry->next) \
233 break; \
234 entry = irq_2_pin + entry->next; \
235 } \
236}
237
238union entry_union { 270union entry_union {
239 struct { u32 w1, w2; }; 271 struct { u32 w1, w2; };
240 struct IO_APIC_route_entry entry; 272 struct IO_APIC_route_entry entry;
@@ -294,54 +326,71 @@ static void ioapic_mask_entry(int apic, int pin)
294static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector) 326static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
295{ 327{
296 int apic, pin; 328 int apic, pin;
297 struct irq_pin_list *entry = irq_2_pin + irq; 329 struct irq_cfg *cfg;
330 struct irq_pin_list *entry;
298 331
299 BUG_ON(irq >= NR_IRQS); 332 cfg = irq_cfg(irq);
333 entry = cfg->irq_2_pin;
300 for (;;) { 334 for (;;) {
301 unsigned int reg; 335 unsigned int reg;
336
337 if (!entry)
338 break;
339
302 apic = entry->apic; 340 apic = entry->apic;
303 pin = entry->pin; 341 pin = entry->pin;
304 if (pin == -1) 342#ifdef CONFIG_INTR_REMAP
305 break; 343 /*
344 * With interrupt-remapping, destination information comes
345 * from interrupt-remapping table entry.
346 */
347 if (!irq_remapped(irq))
348 io_apic_write(apic, 0x11 + pin*2, dest);
349#else
306 io_apic_write(apic, 0x11 + pin*2, dest); 350 io_apic_write(apic, 0x11 + pin*2, dest);
351#endif
307 reg = io_apic_read(apic, 0x10 + pin*2); 352 reg = io_apic_read(apic, 0x10 + pin*2);
308 reg &= ~IO_APIC_REDIR_VECTOR_MASK; 353 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
309 reg |= vector; 354 reg |= vector;
310 io_apic_modify(apic, reg); 355 io_apic_modify(apic, 0x10 + pin*2, reg);
311 if (!entry->next) 356 if (!entry->next)
312 break; 357 break;
313 entry = irq_2_pin + entry->next; 358 entry = entry->next;
314 } 359 }
315} 360}
316 361
362static int assign_irq_vector(int irq, cpumask_t mask);
363
317static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 364static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
318{ 365{
319 struct irq_cfg *cfg = irq_cfg + irq; 366 struct irq_cfg *cfg;
320 unsigned long flags; 367 unsigned long flags;
321 unsigned int dest; 368 unsigned int dest;
322 cpumask_t tmp; 369 cpumask_t tmp;
370 struct irq_desc *desc;
323 371
324 cpus_and(tmp, mask, cpu_online_map); 372 cpus_and(tmp, mask, cpu_online_map);
325 if (cpus_empty(tmp)) 373 if (cpus_empty(tmp))
326 return; 374 return;
327 375
376 cfg = irq_cfg(irq);
328 if (assign_irq_vector(irq, mask)) 377 if (assign_irq_vector(irq, mask))
329 return; 378 return;
330 379
331 cpus_and(tmp, cfg->domain, mask); 380 cpus_and(tmp, cfg->domain, mask);
332 dest = cpu_mask_to_apicid(tmp); 381 dest = cpu_mask_to_apicid(tmp);
333
334 /* 382 /*
335 * Only the high 8 bits are valid. 383 * Only the high 8 bits are valid.
336 */ 384 */
337 dest = SET_APIC_LOGICAL_ID(dest); 385 dest = SET_APIC_LOGICAL_ID(dest);
338 386
387 desc = irq_to_desc(irq);
339 spin_lock_irqsave(&ioapic_lock, flags); 388 spin_lock_irqsave(&ioapic_lock, flags);
340 __target_IO_APIC_irq(irq, dest, cfg->vector); 389 __target_IO_APIC_irq(irq, dest, cfg->vector);
341 irq_desc[irq].affinity = mask; 390 desc->affinity = mask;
342 spin_unlock_irqrestore(&ioapic_lock, flags); 391 spin_unlock_irqrestore(&ioapic_lock, flags);
343} 392}
344#endif 393#endif /* CONFIG_SMP */
345 394
346/* 395/*
347 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are 396 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
@@ -350,19 +399,30 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
350 */ 399 */
351static void add_pin_to_irq(unsigned int irq, int apic, int pin) 400static void add_pin_to_irq(unsigned int irq, int apic, int pin)
352{ 401{
353 static int first_free_entry = NR_IRQS; 402 struct irq_cfg *cfg;
354 struct irq_pin_list *entry = irq_2_pin + irq; 403 struct irq_pin_list *entry;
404
405 /* first time to refer irq_cfg, so with new */
406 cfg = irq_cfg_alloc(irq);
407 entry = cfg->irq_2_pin;
408 if (!entry) {
409 entry = get_one_free_irq_2_pin();
410 cfg->irq_2_pin = entry;
411 entry->apic = apic;
412 entry->pin = pin;
413 return;
414 }
355 415
356 BUG_ON(irq >= NR_IRQS); 416 while (entry->next) {
357 while (entry->next) 417 /* not again, please */
358 entry = irq_2_pin + entry->next; 418 if (entry->apic == apic && entry->pin == pin)
419 return;
359 420
360 if (entry->pin != -1) { 421 entry = entry->next;
361 entry->next = first_free_entry;
362 entry = irq_2_pin + entry->next;
363 if (++first_free_entry >= PIN_MAP_SIZE)
364 panic("io_apic.c: ran out of irq_2_pin entries!");
365 } 422 }
423
424 entry->next = get_one_free_irq_2_pin();
425 entry = entry->next;
366 entry->apic = apic; 426 entry->apic = apic;
367 entry->pin = pin; 427 entry->pin = pin;
368} 428}
@@ -374,30 +434,86 @@ static void __init replace_pin_at_irq(unsigned int irq,
374 int oldapic, int oldpin, 434 int oldapic, int oldpin,
375 int newapic, int newpin) 435 int newapic, int newpin)
376{ 436{
377 struct irq_pin_list *entry = irq_2_pin + irq; 437 struct irq_cfg *cfg = irq_cfg(irq);
438 struct irq_pin_list *entry = cfg->irq_2_pin;
439 int replaced = 0;
378 440
379 while (1) { 441 while (entry) {
380 if (entry->apic == oldapic && entry->pin == oldpin) { 442 if (entry->apic == oldapic && entry->pin == oldpin) {
381 entry->apic = newapic; 443 entry->apic = newapic;
382 entry->pin = newpin; 444 entry->pin = newpin;
383 } 445 replaced = 1;
384 if (!entry->next) 446 /* every one is different, right? */
385 break; 447 break;
386 entry = irq_2_pin + entry->next; 448 }
449 entry = entry->next;
450 }
451
452 /* why? call replace before add? */
453 if (!replaced)
454 add_pin_to_irq(irq, newapic, newpin);
455}
456
457static inline void io_apic_modify_irq(unsigned int irq,
458 int mask_and, int mask_or,
459 void (*final)(struct irq_pin_list *entry))
460{
461 int pin;
462 struct irq_cfg *cfg;
463 struct irq_pin_list *entry;
464
465 cfg = irq_cfg(irq);
466 for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) {
467 unsigned int reg;
468 pin = entry->pin;
469 reg = io_apic_read(entry->apic, 0x10 + pin * 2);
470 reg &= mask_and;
471 reg |= mask_or;
472 io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
473 if (final)
474 final(entry);
387 } 475 }
388} 476}
389 477
478static void __unmask_IO_APIC_irq(unsigned int irq)
479{
480 io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED, 0, NULL);
481}
390 482
391#define DO_ACTION(name,R,ACTION, FINAL) \ 483#ifdef CONFIG_X86_64
392 \ 484void io_apic_sync(struct irq_pin_list *entry)
393 static void name##_IO_APIC_irq (unsigned int irq) \ 485{
394 __DO_ACTION(R, ACTION, FINAL) 486 /*
487 * Synchronize the IO-APIC and the CPU by doing
488 * a dummy read from the IO-APIC
489 */
490 struct io_apic __iomem *io_apic;
491 io_apic = io_apic_base(entry->apic);
492 readl(&io_apic->data);
493}
395 494
396/* mask = 1 */ 495static void __mask_IO_APIC_irq(unsigned int irq)
397DO_ACTION(__mask, 0, |= IO_APIC_REDIR_MASKED, io_apic_sync(entry->apic)) 496{
497 io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
498}
499#else /* CONFIG_X86_32 */
500static void __mask_IO_APIC_irq(unsigned int irq)
501{
502 io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, NULL);
503}
504
505static void __mask_and_edge_IO_APIC_irq(unsigned int irq)
506{
507 io_apic_modify_irq(irq, ~IO_APIC_REDIR_LEVEL_TRIGGER,
508 IO_APIC_REDIR_MASKED, NULL);
509}
398 510
399/* mask = 0 */ 511static void __unmask_and_level_IO_APIC_irq(unsigned int irq)
400DO_ACTION(__unmask, 0, &= ~IO_APIC_REDIR_MASKED, ) 512{
513 io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED,
514 IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
515}
516#endif /* CONFIG_X86_32 */
401 517
402static void mask_IO_APIC_irq (unsigned int irq) 518static void mask_IO_APIC_irq (unsigned int irq)
403{ 519{
@@ -440,24 +556,145 @@ static void clear_IO_APIC (void)
440 clear_IO_APIC_pin(apic, pin); 556 clear_IO_APIC_pin(apic, pin);
441} 557}
442 558
443int skip_ioapic_setup; 559#if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
444int ioapic_force; 560void send_IPI_self(int vector)
561{
562 unsigned int cfg;
445 563
446static int __init parse_noapic(char *str) 564 /*
565 * Wait for idle.
566 */
567 apic_wait_icr_idle();
568 cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
569 /*
570 * Send the IPI. The write to APIC_ICR fires this off.
571 */
572 apic_write(APIC_ICR, cfg);
573}
574#endif /* !CONFIG_SMP && CONFIG_X86_32*/
575
576#ifdef CONFIG_X86_32
577/*
578 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
579 * specific CPU-side IRQs.
580 */
581
582#define MAX_PIRQS 8
583static int pirq_entries [MAX_PIRQS];
584static int pirqs_enabled;
585
586static int __init ioapic_pirq_setup(char *str)
447{ 587{
448 disable_ioapic_setup(); 588 int i, max;
589 int ints[MAX_PIRQS+1];
590
591 get_options(str, ARRAY_SIZE(ints), ints);
592
593 for (i = 0; i < MAX_PIRQS; i++)
594 pirq_entries[i] = -1;
595
596 pirqs_enabled = 1;
597 apic_printk(APIC_VERBOSE, KERN_INFO
598 "PIRQ redirection, working around broken MP-BIOS.\n");
599 max = MAX_PIRQS;
600 if (ints[0] < MAX_PIRQS)
601 max = ints[0];
602
603 for (i = 0; i < max; i++) {
604 apic_printk(APIC_VERBOSE, KERN_DEBUG
605 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
606 /*
607 * PIRQs are mapped upside down, usually.
608 */
609 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
610 }
611 return 1;
612}
613
614__setup("pirq=", ioapic_pirq_setup);
615#endif /* CONFIG_X86_32 */
616
617#ifdef CONFIG_INTR_REMAP
618/* I/O APIC RTE contents at the OS boot up */
619static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
620
621/*
622 * Saves and masks all the unmasked IO-APIC RTE's
623 */
624int save_mask_IO_APIC_setup(void)
625{
626 union IO_APIC_reg_01 reg_01;
627 unsigned long flags;
628 int apic, pin;
629
630 /*
631 * The number of IO-APIC IRQ registers (== #pins):
632 */
633 for (apic = 0; apic < nr_ioapics; apic++) {
634 spin_lock_irqsave(&ioapic_lock, flags);
635 reg_01.raw = io_apic_read(apic, 1);
636 spin_unlock_irqrestore(&ioapic_lock, flags);
637 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
638 }
639
640 for (apic = 0; apic < nr_ioapics; apic++) {
641 early_ioapic_entries[apic] =
642 kzalloc(sizeof(struct IO_APIC_route_entry) *
643 nr_ioapic_registers[apic], GFP_KERNEL);
644 if (!early_ioapic_entries[apic])
645 goto nomem;
646 }
647
648 for (apic = 0; apic < nr_ioapics; apic++)
649 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
650 struct IO_APIC_route_entry entry;
651
652 entry = early_ioapic_entries[apic][pin] =
653 ioapic_read_entry(apic, pin);
654 if (!entry.mask) {
655 entry.mask = 1;
656 ioapic_write_entry(apic, pin, entry);
657 }
658 }
659
449 return 0; 660 return 0;
661
662nomem:
663 while (apic >= 0)
664 kfree(early_ioapic_entries[apic--]);
665 memset(early_ioapic_entries, 0,
666 ARRAY_SIZE(early_ioapic_entries));
667
668 return -ENOMEM;
450} 669}
451early_param("noapic", parse_noapic);
452 670
453/* Actually the next is obsolete, but keep it for paranoid reasons -AK */ 671void restore_IO_APIC_setup(void)
454static int __init disable_timer_pin_setup(char *arg)
455{ 672{
456 disable_timer_pin_1 = 1; 673 int apic, pin;
457 return 1; 674
675 for (apic = 0; apic < nr_ioapics; apic++) {
676 if (!early_ioapic_entries[apic])
677 break;
678 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
679 ioapic_write_entry(apic, pin,
680 early_ioapic_entries[apic][pin]);
681 kfree(early_ioapic_entries[apic]);
682 early_ioapic_entries[apic] = NULL;
683 }
458} 684}
459__setup("disable_timer_pin_1", disable_timer_pin_setup);
460 685
686void reinit_intr_remapped_IO_APIC(int intr_remapping)
687{
688 /*
689 * for now plain restore of previous settings.
690 * TBD: In the case of OS enabling interrupt-remapping,
691 * IO-APIC RTE's need to be setup to point to interrupt-remapping
692 * table entries. for now, do a plain restore, and wait for
693 * the setup_IO_APIC_irqs() to do proper initialization.
694 */
695 restore_IO_APIC_setup();
696}
697#endif
461 698
462/* 699/*
463 * Find the IRQ entry number of a certain pin. 700 * Find the IRQ entry number of a certain pin.
@@ -561,22 +798,54 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
561 best_guess = irq; 798 best_guess = irq;
562 } 799 }
563 } 800 }
564 BUG_ON(best_guess >= NR_IRQS);
565 return best_guess; 801 return best_guess;
566} 802}
567 803
804EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
805
806#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
807/*
808 * EISA Edge/Level control register, ELCR
809 */
810static int EISA_ELCR(unsigned int irq)
811{
812 if (irq < 16) {
813 unsigned int port = 0x4d0 + (irq >> 3);
814 return (inb(port) >> (irq & 7)) & 1;
815 }
816 apic_printk(APIC_VERBOSE, KERN_INFO
817 "Broken MPtable reports ISA irq %d\n", irq);
818 return 0;
819}
820
821#endif
822
568/* ISA interrupts are always polarity zero edge triggered, 823/* ISA interrupts are always polarity zero edge triggered,
569 * when listed as conforming in the MP table. */ 824 * when listed as conforming in the MP table. */
570 825
571#define default_ISA_trigger(idx) (0) 826#define default_ISA_trigger(idx) (0)
572#define default_ISA_polarity(idx) (0) 827#define default_ISA_polarity(idx) (0)
573 828
829/* EISA interrupts are always polarity zero and can be edge or level
830 * trigger depending on the ELCR value. If an interrupt is listed as
831 * EISA conforming in the MP table, that means its trigger type must
832 * be read in from the ELCR */
833
834#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
835#define default_EISA_polarity(idx) default_ISA_polarity(idx)
836
574/* PCI interrupts are always polarity one level triggered, 837/* PCI interrupts are always polarity one level triggered,
575 * when listed as conforming in the MP table. */ 838 * when listed as conforming in the MP table. */
576 839
577#define default_PCI_trigger(idx) (1) 840#define default_PCI_trigger(idx) (1)
578#define default_PCI_polarity(idx) (1) 841#define default_PCI_polarity(idx) (1)
579 842
843/* MCA interrupts are always polarity zero level triggered,
844 * when listed as conforming in the MP table. */
845
846#define default_MCA_trigger(idx) (1)
847#define default_MCA_polarity(idx) default_ISA_polarity(idx)
848
580static int MPBIOS_polarity(int idx) 849static int MPBIOS_polarity(int idx)
581{ 850{
582 int bus = mp_irqs[idx].mp_srcbus; 851 int bus = mp_irqs[idx].mp_srcbus;
@@ -634,6 +903,36 @@ static int MPBIOS_trigger(int idx)
634 trigger = default_ISA_trigger(idx); 903 trigger = default_ISA_trigger(idx);
635 else 904 else
636 trigger = default_PCI_trigger(idx); 905 trigger = default_PCI_trigger(idx);
906#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
907 switch (mp_bus_id_to_type[bus]) {
908 case MP_BUS_ISA: /* ISA pin */
909 {
910 /* set before the switch */
911 break;
912 }
913 case MP_BUS_EISA: /* EISA pin */
914 {
915 trigger = default_EISA_trigger(idx);
916 break;
917 }
918 case MP_BUS_PCI: /* PCI pin */
919 {
920 /* set before the switch */
921 break;
922 }
923 case MP_BUS_MCA: /* MCA pin */
924 {
925 trigger = default_MCA_trigger(idx);
926 break;
927 }
928 default:
929 {
930 printk(KERN_WARNING "broken BIOS!!\n");
931 trigger = 1;
932 break;
933 }
934 }
935#endif
637 break; 936 break;
638 case 1: /* edge */ 937 case 1: /* edge */
639 { 938 {
@@ -671,6 +970,7 @@ static inline int irq_trigger(int idx)
671 return MPBIOS_trigger(idx); 970 return MPBIOS_trigger(idx);
672} 971}
673 972
973int (*ioapic_renumber_irq)(int ioapic, int irq);
674static int pin_2_irq(int idx, int apic, int pin) 974static int pin_2_irq(int idx, int apic, int pin)
675{ 975{
676 int irq, i; 976 int irq, i;
@@ -692,8 +992,32 @@ static int pin_2_irq(int idx, int apic, int pin)
692 while (i < apic) 992 while (i < apic)
693 irq += nr_ioapic_registers[i++]; 993 irq += nr_ioapic_registers[i++];
694 irq += pin; 994 irq += pin;
995 /*
996 * For MPS mode, so far only needed by ES7000 platform
997 */
998 if (ioapic_renumber_irq)
999 irq = ioapic_renumber_irq(apic, irq);
695 } 1000 }
696 BUG_ON(irq >= NR_IRQS); 1001
1002#ifdef CONFIG_X86_32
1003 /*
1004 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1005 */
1006 if ((pin >= 16) && (pin <= 23)) {
1007 if (pirq_entries[pin-16] != -1) {
1008 if (!pirq_entries[pin-16]) {
1009 apic_printk(APIC_VERBOSE, KERN_DEBUG
1010 "disabling PIRQ%d\n", pin-16);
1011 } else {
1012 irq = pirq_entries[pin-16];
1013 apic_printk(APIC_VERBOSE, KERN_DEBUG
1014 "using PIRQ%d -> IRQ %d\n",
1015 pin-16, irq);
1016 }
1017 }
1018 }
1019#endif
1020
697 return irq; 1021 return irq;
698} 1022}
699 1023
@@ -728,8 +1052,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask)
728 int cpu; 1052 int cpu;
729 struct irq_cfg *cfg; 1053 struct irq_cfg *cfg;
730 1054
731 BUG_ON((unsigned)irq >= NR_IRQS); 1055 cfg = irq_cfg(irq);
732 cfg = &irq_cfg[irq];
733 1056
734 /* Only try and allocate irqs on cpus that are present */ 1057 /* Only try and allocate irqs on cpus that are present */
735 cpus_and(mask, mask, cpu_online_map); 1058 cpus_and(mask, mask, cpu_online_map);
@@ -764,8 +1087,13 @@ next:
764 } 1087 }
765 if (unlikely(current_vector == vector)) 1088 if (unlikely(current_vector == vector))
766 continue; 1089 continue;
1090#ifdef CONFIG_X86_64
767 if (vector == IA32_SYSCALL_VECTOR) 1091 if (vector == IA32_SYSCALL_VECTOR)
768 goto next; 1092 goto next;
1093#else
1094 if (vector == SYSCALL_VECTOR)
1095 goto next;
1096#endif
769 for_each_cpu_mask_nr(new_cpu, new_mask) 1097 for_each_cpu_mask_nr(new_cpu, new_mask)
770 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 1098 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
771 goto next; 1099 goto next;
@@ -802,8 +1130,7 @@ static void __clear_irq_vector(int irq)
802 cpumask_t mask; 1130 cpumask_t mask;
803 int cpu, vector; 1131 int cpu, vector;
804 1132
805 BUG_ON((unsigned)irq >= NR_IRQS); 1133 cfg = irq_cfg(irq);
806 cfg = &irq_cfg[irq];
807 BUG_ON(!cfg->vector); 1134 BUG_ON(!cfg->vector);
808 1135
809 vector = cfg->vector; 1136 vector = cfg->vector;
@@ -820,12 +1147,13 @@ void __setup_vector_irq(int cpu)
820 /* Initialize vector_irq on a new cpu */ 1147 /* Initialize vector_irq on a new cpu */
821 /* This function must be called with vector_lock held */ 1148 /* This function must be called with vector_lock held */
822 int irq, vector; 1149 int irq, vector;
1150 struct irq_cfg *cfg;
823 1151
824 /* Mark the inuse vectors */ 1152 /* Mark the inuse vectors */
825 for (irq = 0; irq < NR_IRQS; ++irq) { 1153 for_each_irq_cfg(irq, cfg) {
826 if (!cpu_isset(cpu, irq_cfg[irq].domain)) 1154 if (!cpu_isset(cpu, cfg->domain))
827 continue; 1155 continue;
828 vector = irq_cfg[irq].vector; 1156 vector = cfg->vector;
829 per_cpu(vector_irq, cpu)[vector] = irq; 1157 per_cpu(vector_irq, cpu)[vector] = irq;
830 } 1158 }
831 /* Mark the free vectors */ 1159 /* Mark the free vectors */
@@ -833,36 +1161,154 @@ void __setup_vector_irq(int cpu)
833 irq = per_cpu(vector_irq, cpu)[vector]; 1161 irq = per_cpu(vector_irq, cpu)[vector];
834 if (irq < 0) 1162 if (irq < 0)
835 continue; 1163 continue;
836 if (!cpu_isset(cpu, irq_cfg[irq].domain)) 1164
1165 cfg = irq_cfg(irq);
1166 if (!cpu_isset(cpu, cfg->domain))
837 per_cpu(vector_irq, cpu)[vector] = -1; 1167 per_cpu(vector_irq, cpu)[vector] = -1;
838 } 1168 }
839} 1169}
840 1170
841static struct irq_chip ioapic_chip; 1171static struct irq_chip ioapic_chip;
1172#ifdef CONFIG_INTR_REMAP
1173static struct irq_chip ir_ioapic_chip;
1174#endif
1175
1176#define IOAPIC_AUTO -1
1177#define IOAPIC_EDGE 0
1178#define IOAPIC_LEVEL 1
1179
1180#ifdef CONFIG_X86_32
1181static inline int IO_APIC_irq_trigger(int irq)
1182{
1183 int apic, idx, pin;
1184
1185 for (apic = 0; apic < nr_ioapics; apic++) {
1186 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1187 idx = find_irq_entry(apic, pin, mp_INT);
1188 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1189 return irq_trigger(idx);
1190 }
1191 }
1192 /*
1193 * nonexistent IRQs are edge default
1194 */
1195 return 0;
1196}
1197#else
1198static inline int IO_APIC_irq_trigger(int irq)
1199{
1200 return 1;
1201}
1202#endif
842 1203
843static void ioapic_register_intr(int irq, unsigned long trigger) 1204static void ioapic_register_intr(int irq, unsigned long trigger)
844{ 1205{
845 if (trigger) { 1206 struct irq_desc *desc;
846 irq_desc[irq].status |= IRQ_LEVEL; 1207
1208 desc = irq_to_desc(irq);
1209
1210 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1211 trigger == IOAPIC_LEVEL)
1212 desc->status |= IRQ_LEVEL;
1213 else
1214 desc->status &= ~IRQ_LEVEL;
1215
1216#ifdef CONFIG_INTR_REMAP
1217 if (irq_remapped(irq)) {
1218 desc->status |= IRQ_MOVE_PCNTXT;
1219 if (trigger)
1220 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1221 handle_fasteoi_irq,
1222 "fasteoi");
1223 else
1224 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1225 handle_edge_irq, "edge");
1226 return;
1227 }
1228#endif
1229 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1230 trigger == IOAPIC_LEVEL)
847 set_irq_chip_and_handler_name(irq, &ioapic_chip, 1231 set_irq_chip_and_handler_name(irq, &ioapic_chip,
848 handle_fasteoi_irq, "fasteoi"); 1232 handle_fasteoi_irq,
849 } else { 1233 "fasteoi");
850 irq_desc[irq].status &= ~IRQ_LEVEL; 1234 else
851 set_irq_chip_and_handler_name(irq, &ioapic_chip, 1235 set_irq_chip_and_handler_name(irq, &ioapic_chip,
852 handle_edge_irq, "edge"); 1236 handle_edge_irq, "edge");
1237}
1238
1239static int setup_ioapic_entry(int apic, int irq,
1240 struct IO_APIC_route_entry *entry,
1241 unsigned int destination, int trigger,
1242 int polarity, int vector)
1243{
1244 /*
1245 * add it to the IO-APIC irq-routing table:
1246 */
1247 memset(entry,0,sizeof(*entry));
1248
1249#ifdef CONFIG_INTR_REMAP
1250 if (intr_remapping_enabled) {
1251 struct intel_iommu *iommu = map_ioapic_to_ir(apic);
1252 struct irte irte;
1253 struct IR_IO_APIC_route_entry *ir_entry =
1254 (struct IR_IO_APIC_route_entry *) entry;
1255 int index;
1256
1257 if (!iommu)
1258 panic("No mapping iommu for ioapic %d\n", apic);
1259
1260 index = alloc_irte(iommu, irq, 1);
1261 if (index < 0)
1262 panic("Failed to allocate IRTE for ioapic %d\n", apic);
1263
1264 memset(&irte, 0, sizeof(irte));
1265
1266 irte.present = 1;
1267 irte.dst_mode = INT_DEST_MODE;
1268 irte.trigger_mode = trigger;
1269 irte.dlvry_mode = INT_DELIVERY_MODE;
1270 irte.vector = vector;
1271 irte.dest_id = IRTE_DEST(destination);
1272
1273 modify_irte(irq, &irte);
1274
1275 ir_entry->index2 = (index >> 15) & 0x1;
1276 ir_entry->zero = 0;
1277 ir_entry->format = 1;
1278 ir_entry->index = (index & 0x7fff);
1279 } else
1280#endif
1281 {
1282 entry->delivery_mode = INT_DELIVERY_MODE;
1283 entry->dest_mode = INT_DEST_MODE;
1284 entry->dest = destination;
853 } 1285 }
1286
1287 entry->mask = 0; /* enable IRQ */
1288 entry->trigger = trigger;
1289 entry->polarity = polarity;
1290 entry->vector = vector;
1291
1292 /* Mask level triggered irqs.
1293 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1294 */
1295 if (trigger)
1296 entry->mask = 1;
1297 return 0;
854} 1298}
855 1299
856static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, 1300static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
857 int trigger, int polarity) 1301 int trigger, int polarity)
858{ 1302{
859 struct irq_cfg *cfg = irq_cfg + irq; 1303 struct irq_cfg *cfg;
860 struct IO_APIC_route_entry entry; 1304 struct IO_APIC_route_entry entry;
861 cpumask_t mask; 1305 cpumask_t mask;
862 1306
863 if (!IO_APIC_IRQ(irq)) 1307 if (!IO_APIC_IRQ(irq))
864 return; 1308 return;
865 1309
1310 cfg = irq_cfg(irq);
1311
866 mask = TARGET_CPUS; 1312 mask = TARGET_CPUS;
867 if (assign_irq_vector(irq, mask)) 1313 if (assign_irq_vector(irq, mask))
868 return; 1314 return;
@@ -875,24 +1321,15 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
875 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector, 1321 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
876 irq, trigger, polarity); 1322 irq, trigger, polarity);
877 1323
878 /*
879 * add it to the IO-APIC irq-routing table:
880 */
881 memset(&entry,0,sizeof(entry));
882
883 entry.delivery_mode = INT_DELIVERY_MODE;
884 entry.dest_mode = INT_DEST_MODE;
885 entry.dest = cpu_mask_to_apicid(mask);
886 entry.mask = 0; /* enable IRQ */
887 entry.trigger = trigger;
888 entry.polarity = polarity;
889 entry.vector = cfg->vector;
890 1324
891 /* Mask level triggered irqs. 1325 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
892 * Use IRQ_DELAYED_DISABLE for edge triggered irqs. 1326 cpu_mask_to_apicid(mask), trigger, polarity,
893 */ 1327 cfg->vector)) {
894 if (trigger) 1328 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
895 entry.mask = 1; 1329 mp_ioapics[apic].mp_apicid, pin);
1330 __clear_irq_vector(irq);
1331 return;
1332 }
896 1333
897 ioapic_register_intr(irq, trigger); 1334 ioapic_register_intr(irq, trigger);
898 if (irq < 16) 1335 if (irq < 16)
@@ -903,37 +1340,49 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
903 1340
904static void __init setup_IO_APIC_irqs(void) 1341static void __init setup_IO_APIC_irqs(void)
905{ 1342{
906 int apic, pin, idx, irq, first_notcon = 1; 1343 int apic, pin, idx, irq;
1344 int notcon = 0;
907 1345
908 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1346 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
909 1347
910 for (apic = 0; apic < nr_ioapics; apic++) { 1348 for (apic = 0; apic < nr_ioapics; apic++) {
911 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 1349 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
912
913 idx = find_irq_entry(apic,pin,mp_INT);
914 if (idx == -1) {
915 if (first_notcon) {
916 apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mp_apicid, pin);
917 first_notcon = 0;
918 } else
919 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mp_apicid, pin);
920 continue;
921 }
922 if (!first_notcon) {
923 apic_printk(APIC_VERBOSE, " not connected.\n");
924 first_notcon = 1;
925 }
926 1350
927 irq = pin_2_irq(idx, apic, pin); 1351 idx = find_irq_entry(apic, pin, mp_INT);
928 add_pin_to_irq(irq, apic, pin); 1352 if (idx == -1) {
1353 if (!notcon) {
1354 notcon = 1;
1355 apic_printk(APIC_VERBOSE,
1356 KERN_DEBUG " %d-%d",
1357 mp_ioapics[apic].mp_apicid,
1358 pin);
1359 } else
1360 apic_printk(APIC_VERBOSE, " %d-%d",
1361 mp_ioapics[apic].mp_apicid,
1362 pin);
1363 continue;
1364 }
1365 if (notcon) {
1366 apic_printk(APIC_VERBOSE,
1367 " (apicid-pin) not connected\n");
1368 notcon = 0;
1369 }
929 1370
930 setup_IO_APIC_irq(apic, pin, irq, 1371 irq = pin_2_irq(idx, apic, pin);
931 irq_trigger(idx), irq_polarity(idx)); 1372#ifdef CONFIG_X86_32
932 } 1373 if (multi_timer_check(apic, irq))
1374 continue;
1375#endif
1376 add_pin_to_irq(irq, apic, pin);
1377
1378 setup_IO_APIC_irq(apic, pin, irq,
1379 irq_trigger(idx), irq_polarity(idx));
1380 }
933 } 1381 }
934 1382
935 if (!first_notcon) 1383 if (notcon)
936 apic_printk(APIC_VERBOSE, " not connected.\n"); 1384 apic_printk(APIC_VERBOSE,
1385 " (apicid-pin) not connected\n");
937} 1386}
938 1387
939/* 1388/*
@@ -944,6 +1393,11 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
944{ 1393{
945 struct IO_APIC_route_entry entry; 1394 struct IO_APIC_route_entry entry;
946 1395
1396#ifdef CONFIG_INTR_REMAP
1397 if (intr_remapping_enabled)
1398 return;
1399#endif
1400
947 memset(&entry, 0, sizeof(entry)); 1401 memset(&entry, 0, sizeof(entry));
948 1402
949 /* 1403 /*
@@ -970,13 +1424,17 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
970 ioapic_write_entry(apic, pin, entry); 1424 ioapic_write_entry(apic, pin, entry);
971} 1425}
972 1426
973void __apicdebuginit print_IO_APIC(void) 1427
1428__apicdebuginit(void) print_IO_APIC(void)
974{ 1429{
975 int apic, i; 1430 int apic, i;
976 union IO_APIC_reg_00 reg_00; 1431 union IO_APIC_reg_00 reg_00;
977 union IO_APIC_reg_01 reg_01; 1432 union IO_APIC_reg_01 reg_01;
978 union IO_APIC_reg_02 reg_02; 1433 union IO_APIC_reg_02 reg_02;
1434 union IO_APIC_reg_03 reg_03;
979 unsigned long flags; 1435 unsigned long flags;
1436 struct irq_cfg *cfg;
1437 unsigned int irq;
980 1438
981 if (apic_verbosity == APIC_QUIET) 1439 if (apic_verbosity == APIC_QUIET)
982 return; 1440 return;
@@ -999,12 +1457,16 @@ void __apicdebuginit print_IO_APIC(void)
999 reg_01.raw = io_apic_read(apic, 1); 1457 reg_01.raw = io_apic_read(apic, 1);
1000 if (reg_01.bits.version >= 0x10) 1458 if (reg_01.bits.version >= 0x10)
1001 reg_02.raw = io_apic_read(apic, 2); 1459 reg_02.raw = io_apic_read(apic, 2);
1460 if (reg_01.bits.version >= 0x20)
1461 reg_03.raw = io_apic_read(apic, 3);
1002 spin_unlock_irqrestore(&ioapic_lock, flags); 1462 spin_unlock_irqrestore(&ioapic_lock, flags);
1003 1463
1004 printk("\n"); 1464 printk("\n");
1005 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid); 1465 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
1006 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); 1466 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1007 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); 1467 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1468 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1469 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1008 1470
1009 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01); 1471 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
1010 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries); 1472 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
@@ -1012,11 +1474,27 @@ void __apicdebuginit print_IO_APIC(void)
1012 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); 1474 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1013 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version); 1475 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1014 1476
1015 if (reg_01.bits.version >= 0x10) { 1477 /*
1478 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1479 * but the value of reg_02 is read as the previous read register
1480 * value, so ignore it if reg_02 == reg_01.
1481 */
1482 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1016 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); 1483 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1017 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); 1484 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1018 } 1485 }
1019 1486
1487 /*
1488 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1489 * or reg_03, but the value of reg_0[23] is read as the previous read
1490 * register value, so ignore it if reg_03 == reg_0[12].
1491 */
1492 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1493 reg_03.raw != reg_01.raw) {
1494 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1495 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1496 }
1497
1020 printk(KERN_DEBUG ".... IRQ redirection table:\n"); 1498 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1021 1499
1022 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" 1500 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
@@ -1045,16 +1523,16 @@ void __apicdebuginit print_IO_APIC(void)
1045 } 1523 }
1046 } 1524 }
1047 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1525 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1048 for (i = 0; i < NR_IRQS; i++) { 1526 for_each_irq_cfg(irq, cfg) {
1049 struct irq_pin_list *entry = irq_2_pin + i; 1527 struct irq_pin_list *entry = cfg->irq_2_pin;
1050 if (entry->pin < 0) 1528 if (!entry)
1051 continue; 1529 continue;
1052 printk(KERN_DEBUG "IRQ%d ", i); 1530 printk(KERN_DEBUG "IRQ%d ", irq);
1053 for (;;) { 1531 for (;;) {
1054 printk("-> %d:%d", entry->apic, entry->pin); 1532 printk("-> %d:%d", entry->apic, entry->pin);
1055 if (!entry->next) 1533 if (!entry->next)
1056 break; 1534 break;
1057 entry = irq_2_pin + entry->next; 1535 entry = entry->next;
1058 } 1536 }
1059 printk("\n"); 1537 printk("\n");
1060 } 1538 }
@@ -1064,9 +1542,7 @@ void __apicdebuginit print_IO_APIC(void)
1064 return; 1542 return;
1065} 1543}
1066 1544
1067#if 0 1545__apicdebuginit(void) print_APIC_bitfield(int base)
1068
1069static __apicdebuginit void print_APIC_bitfield (int base)
1070{ 1546{
1071 unsigned int v; 1547 unsigned int v;
1072 int i, j; 1548 int i, j;
@@ -1087,9 +1563,10 @@ static __apicdebuginit void print_APIC_bitfield (int base)
1087 } 1563 }
1088} 1564}
1089 1565
1090void __apicdebuginit print_local_APIC(void * dummy) 1566__apicdebuginit(void) print_local_APIC(void *dummy)
1091{ 1567{
1092 unsigned int v, ver, maxlvt; 1568 unsigned int v, ver, maxlvt;
1569 u64 icr;
1093 1570
1094 if (apic_verbosity == APIC_QUIET) 1571 if (apic_verbosity == APIC_QUIET)
1095 return; 1572 return;
@@ -1097,7 +1574,7 @@ void __apicdebuginit print_local_APIC(void * dummy)
1097 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", 1574 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1098 smp_processor_id(), hard_smp_processor_id()); 1575 smp_processor_id(), hard_smp_processor_id());
1099 v = apic_read(APIC_ID); 1576 v = apic_read(APIC_ID);
1100 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(read_apic_id())); 1577 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1101 v = apic_read(APIC_LVR); 1578 v = apic_read(APIC_LVR);
1102 printk(KERN_INFO "... APIC VERSION: %08x\n", v); 1579 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1103 ver = GET_APIC_VERSION(v); 1580 ver = GET_APIC_VERSION(v);
@@ -1106,20 +1583,31 @@ void __apicdebuginit print_local_APIC(void * dummy)
1106 v = apic_read(APIC_TASKPRI); 1583 v = apic_read(APIC_TASKPRI);
1107 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); 1584 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1108 1585
1109 v = apic_read(APIC_ARBPRI); 1586 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1110 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v, 1587 if (!APIC_XAPIC(ver)) {
1111 v & APIC_ARBPRI_MASK); 1588 v = apic_read(APIC_ARBPRI);
1112 v = apic_read(APIC_PROCPRI); 1589 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1113 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v); 1590 v & APIC_ARBPRI_MASK);
1591 }
1592 v = apic_read(APIC_PROCPRI);
1593 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1594 }
1595
1596 /*
1597 * Remote read supported only in the 82489DX and local APIC for
1598 * Pentium processors.
1599 */
1600 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1601 v = apic_read(APIC_RRR);
1602 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1603 }
1114 1604
1115 v = apic_read(APIC_EOI);
1116 printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
1117 v = apic_read(APIC_RRR);
1118 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1119 v = apic_read(APIC_LDR); 1605 v = apic_read(APIC_LDR);
1120 printk(KERN_DEBUG "... APIC LDR: %08x\n", v); 1606 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1121 v = apic_read(APIC_DFR); 1607 if (!x2apic_enabled()) {
1122 printk(KERN_DEBUG "... APIC DFR: %08x\n", v); 1608 v = apic_read(APIC_DFR);
1609 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1610 }
1123 v = apic_read(APIC_SPIV); 1611 v = apic_read(APIC_SPIV);
1124 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v); 1612 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1125 1613
@@ -1130,13 +1618,17 @@ void __apicdebuginit print_local_APIC(void * dummy)
1130 printk(KERN_DEBUG "... APIC IRR field:\n"); 1618 printk(KERN_DEBUG "... APIC IRR field:\n");
1131 print_APIC_bitfield(APIC_IRR); 1619 print_APIC_bitfield(APIC_IRR);
1132 1620
1133 v = apic_read(APIC_ESR); 1621 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1134 printk(KERN_DEBUG "... APIC ESR: %08x\n", v); 1622 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1623 apic_write(APIC_ESR, 0);
1135 1624
1136 v = apic_read(APIC_ICR); 1625 v = apic_read(APIC_ESR);
1137 printk(KERN_DEBUG "... APIC ICR: %08x\n", v); 1626 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1138 v = apic_read(APIC_ICR2); 1627 }
1139 printk(KERN_DEBUG "... APIC ICR2: %08x\n", v); 1628
1629 icr = apic_icr_read();
1630 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1631 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1140 1632
1141 v = apic_read(APIC_LVTT); 1633 v = apic_read(APIC_LVTT);
1142 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); 1634 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
@@ -1164,12 +1656,17 @@ void __apicdebuginit print_local_APIC(void * dummy)
1164 printk("\n"); 1656 printk("\n");
1165} 1657}
1166 1658
1167void print_all_local_APICs (void) 1659__apicdebuginit(void) print_all_local_APICs(void)
1168{ 1660{
1169 on_each_cpu(print_local_APIC, NULL, 1); 1661 int cpu;
1662
1663 preempt_disable();
1664 for_each_online_cpu(cpu)
1665 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1666 preempt_enable();
1170} 1667}
1171 1668
1172void __apicdebuginit print_PIC(void) 1669__apicdebuginit(void) print_PIC(void)
1173{ 1670{
1174 unsigned int v; 1671 unsigned int v;
1175 unsigned long flags; 1672 unsigned long flags;
@@ -1201,19 +1698,34 @@ void __apicdebuginit print_PIC(void)
1201 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); 1698 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1202} 1699}
1203 1700
1204#endif /* 0 */ 1701__apicdebuginit(int) print_all_ICs(void)
1702{
1703 print_PIC();
1704 print_all_local_APICs();
1705 print_IO_APIC();
1706
1707 return 0;
1708}
1709
1710fs_initcall(print_all_ICs);
1711
1712
1713/* Where if anywhere is the i8259 connect in external int mode */
1714static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1205 1715
1206void __init enable_IO_APIC(void) 1716void __init enable_IO_APIC(void)
1207{ 1717{
1208 union IO_APIC_reg_01 reg_01; 1718 union IO_APIC_reg_01 reg_01;
1209 int i8259_apic, i8259_pin; 1719 int i8259_apic, i8259_pin;
1210 int i, apic; 1720 int apic;
1211 unsigned long flags; 1721 unsigned long flags;
1212 1722
1213 for (i = 0; i < PIN_MAP_SIZE; i++) { 1723#ifdef CONFIG_X86_32
1214 irq_2_pin[i].pin = -1; 1724 int i;
1215 irq_2_pin[i].next = 0; 1725 if (!pirqs_enabled)
1216 } 1726 for (i = 0; i < MAX_PIRQS; i++)
1727 pirq_entries[i] = -1;
1728#endif
1217 1729
1218 /* 1730 /*
1219 * The number of IO-APIC IRQ registers (== #pins): 1731 * The number of IO-APIC IRQ registers (== #pins):
@@ -1243,6 +1755,10 @@ void __init enable_IO_APIC(void)
1243 } 1755 }
1244 found_i8259: 1756 found_i8259:
1245 /* Look to see what if the MP table has reported the ExtINT */ 1757 /* Look to see what if the MP table has reported the ExtINT */
1758 /* If we could not find the appropriate pin by looking at the ioapic
1759 * the i8259 probably is not connected the ioapic but give the
1760 * mptable a chance anyway.
1761 */
1246 i8259_pin = find_isa_irq_pin(0, mp_ExtINT); 1762 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1247 i8259_apic = find_isa_irq_apic(0, mp_ExtINT); 1763 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1248 /* Trust the MP table if nothing is setup in the hardware */ 1764 /* Trust the MP table if nothing is setup in the hardware */
@@ -1291,7 +1807,7 @@ void disable_IO_APIC(void)
1291 entry.dest_mode = 0; /* Physical */ 1807 entry.dest_mode = 0; /* Physical */
1292 entry.delivery_mode = dest_ExtINT; /* ExtInt */ 1808 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1293 entry.vector = 0; 1809 entry.vector = 0;
1294 entry.dest = GET_APIC_ID(read_apic_id()); 1810 entry.dest = read_apic_id();
1295 1811
1296 /* 1812 /*
1297 * Add it to the IO-APIC irq-routing table: 1813 * Add it to the IO-APIC irq-routing table:
@@ -1302,6 +1818,133 @@ void disable_IO_APIC(void)
1302 disconnect_bsp_APIC(ioapic_i8259.pin != -1); 1818 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1303} 1819}
1304 1820
1821#ifdef CONFIG_X86_32
1822/*
1823 * function to set the IO-APIC physical IDs based on the
1824 * values stored in the MPC table.
1825 *
1826 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1827 */
1828
1829static void __init setup_ioapic_ids_from_mpc(void)
1830{
1831 union IO_APIC_reg_00 reg_00;
1832 physid_mask_t phys_id_present_map;
1833 int apic;
1834 int i;
1835 unsigned char old_id;
1836 unsigned long flags;
1837
1838 if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids())
1839 return;
1840
1841 /*
1842 * Don't check I/O APIC IDs for xAPIC systems. They have
1843 * no meaning without the serial APIC bus.
1844 */
1845 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1846 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
1847 return;
1848 /*
1849 * This is broken; anything with a real cpu count has to
1850 * circumvent this idiocy regardless.
1851 */
1852 phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
1853
1854 /*
1855 * Set the IOAPIC ID to the value stored in the MPC table.
1856 */
1857 for (apic = 0; apic < nr_ioapics; apic++) {
1858
1859 /* Read the register 0 value */
1860 spin_lock_irqsave(&ioapic_lock, flags);
1861 reg_00.raw = io_apic_read(apic, 0);
1862 spin_unlock_irqrestore(&ioapic_lock, flags);
1863
1864 old_id = mp_ioapics[apic].mp_apicid;
1865
1866 if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
1867 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
1868 apic, mp_ioapics[apic].mp_apicid);
1869 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1870 reg_00.bits.ID);
1871 mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
1872 }
1873
1874 /*
1875 * Sanity check, is the ID really free? Every APIC in a
1876 * system must have a unique ID or we get lots of nice
1877 * 'stuck on smp_invalidate_needed IPI wait' messages.
1878 */
1879 if (check_apicid_used(phys_id_present_map,
1880 mp_ioapics[apic].mp_apicid)) {
1881 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
1882 apic, mp_ioapics[apic].mp_apicid);
1883 for (i = 0; i < get_physical_broadcast(); i++)
1884 if (!physid_isset(i, phys_id_present_map))
1885 break;
1886 if (i >= get_physical_broadcast())
1887 panic("Max APIC ID exceeded!\n");
1888 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1889 i);
1890 physid_set(i, phys_id_present_map);
1891 mp_ioapics[apic].mp_apicid = i;
1892 } else {
1893 physid_mask_t tmp;
1894 tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
1895 apic_printk(APIC_VERBOSE, "Setting %d in the "
1896 "phys_id_present_map\n",
1897 mp_ioapics[apic].mp_apicid);
1898 physids_or(phys_id_present_map, phys_id_present_map, tmp);
1899 }
1900
1901
1902 /*
1903 * We need to adjust the IRQ routing table
1904 * if the ID changed.
1905 */
1906 if (old_id != mp_ioapics[apic].mp_apicid)
1907 for (i = 0; i < mp_irq_entries; i++)
1908 if (mp_irqs[i].mp_dstapic == old_id)
1909 mp_irqs[i].mp_dstapic
1910 = mp_ioapics[apic].mp_apicid;
1911
1912 /*
1913 * Read the right value from the MPC table and
1914 * write it into the ID register.
1915 */
1916 apic_printk(APIC_VERBOSE, KERN_INFO
1917 "...changing IO-APIC physical APIC ID to %d ...",
1918 mp_ioapics[apic].mp_apicid);
1919
1920 reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
1921 spin_lock_irqsave(&ioapic_lock, flags);
1922 io_apic_write(apic, 0, reg_00.raw);
1923 spin_unlock_irqrestore(&ioapic_lock, flags);
1924
1925 /*
1926 * Sanity check
1927 */
1928 spin_lock_irqsave(&ioapic_lock, flags);
1929 reg_00.raw = io_apic_read(apic, 0);
1930 spin_unlock_irqrestore(&ioapic_lock, flags);
1931 if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
1932 printk("could not set ID!\n");
1933 else
1934 apic_printk(APIC_VERBOSE, " ok.\n");
1935 }
1936}
1937#endif
1938
1939int no_timer_check __initdata;
1940
1941static int __init notimercheck(char *s)
1942{
1943 no_timer_check = 1;
1944 return 1;
1945}
1946__setup("no_timer_check", notimercheck);
1947
1305/* 1948/*
1306 * There is a nasty bug in some older SMP boards, their mptable lies 1949 * There is a nasty bug in some older SMP boards, their mptable lies
1307 * about the timer IRQ. We do the following to work around the situation: 1950 * about the timer IRQ. We do the following to work around the situation:
@@ -1315,6 +1958,9 @@ static int __init timer_irq_works(void)
1315 unsigned long t1 = jiffies; 1958 unsigned long t1 = jiffies;
1316 unsigned long flags; 1959 unsigned long flags;
1317 1960
1961 if (no_timer_check)
1962 return 1;
1963
1318 local_save_flags(flags); 1964 local_save_flags(flags);
1319 local_irq_enable(); 1965 local_irq_enable();
1320 /* Let ten ticks pass... */ 1966 /* Let ten ticks pass... */
@@ -1375,9 +2021,11 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
1375 return was_pending; 2021 return was_pending;
1376} 2022}
1377 2023
2024#ifdef CONFIG_X86_64
1378static int ioapic_retrigger_irq(unsigned int irq) 2025static int ioapic_retrigger_irq(unsigned int irq)
1379{ 2026{
1380 struct irq_cfg *cfg = &irq_cfg[irq]; 2027
2028 struct irq_cfg *cfg = irq_cfg(irq);
1381 unsigned long flags; 2029 unsigned long flags;
1382 2030
1383 spin_lock_irqsave(&vector_lock, flags); 2031 spin_lock_irqsave(&vector_lock, flags);
@@ -1386,6 +2034,14 @@ static int ioapic_retrigger_irq(unsigned int irq)
1386 2034
1387 return 1; 2035 return 1;
1388} 2036}
2037#else
2038static int ioapic_retrigger_irq(unsigned int irq)
2039{
2040 send_IPI_self(irq_cfg(irq)->vector);
2041
2042 return 1;
2043}
2044#endif
1389 2045
1390/* 2046/*
1391 * Level and edge triggered IO-APIC interrupts need different handling, 2047 * Level and edge triggered IO-APIC interrupts need different handling,
@@ -1397,11 +2053,159 @@ static int ioapic_retrigger_irq(unsigned int irq)
1397 */ 2053 */
1398 2054
1399#ifdef CONFIG_SMP 2055#ifdef CONFIG_SMP
2056
2057#ifdef CONFIG_INTR_REMAP
2058static void ir_irq_migration(struct work_struct *work);
2059
2060static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
2061
2062/*
2063 * Migrate the IO-APIC irq in the presence of intr-remapping.
2064 *
2065 * For edge triggered, irq migration is a simple atomic update(of vector
2066 * and cpu destination) of IRTE and flush the hardware cache.
2067 *
2068 * For level triggered, we need to modify the io-apic RTE aswell with the update
2069 * vector information, along with modifying IRTE with vector and destination.
2070 * So irq migration for level triggered is little bit more complex compared to
2071 * edge triggered migration. But the good news is, we use the same algorithm
2072 * for level triggered migration as we have today, only difference being,
2073 * we now initiate the irq migration from process context instead of the
2074 * interrupt context.
2075 *
2076 * In future, when we do a directed EOI (combined with cpu EOI broadcast
2077 * suppression) to the IO-APIC, level triggered irq migration will also be
2078 * as simple as edge triggered migration and we can do the irq migration
2079 * with a simple atomic update to IO-APIC RTE.
2080 */
2081static void migrate_ioapic_irq(int irq, cpumask_t mask)
2082{
2083 struct irq_cfg *cfg;
2084 struct irq_desc *desc;
2085 cpumask_t tmp, cleanup_mask;
2086 struct irte irte;
2087 int modify_ioapic_rte;
2088 unsigned int dest;
2089 unsigned long flags;
2090
2091 cpus_and(tmp, mask, cpu_online_map);
2092 if (cpus_empty(tmp))
2093 return;
2094
2095 if (get_irte(irq, &irte))
2096 return;
2097
2098 if (assign_irq_vector(irq, mask))
2099 return;
2100
2101 cfg = irq_cfg(irq);
2102 cpus_and(tmp, cfg->domain, mask);
2103 dest = cpu_mask_to_apicid(tmp);
2104
2105 desc = irq_to_desc(irq);
2106 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2107 if (modify_ioapic_rte) {
2108 spin_lock_irqsave(&ioapic_lock, flags);
2109 __target_IO_APIC_irq(irq, dest, cfg->vector);
2110 spin_unlock_irqrestore(&ioapic_lock, flags);
2111 }
2112
2113 irte.vector = cfg->vector;
2114 irte.dest_id = IRTE_DEST(dest);
2115
2116 /*
2117 * Modified the IRTE and flushes the Interrupt entry cache.
2118 */
2119 modify_irte(irq, &irte);
2120
2121 if (cfg->move_in_progress) {
2122 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2123 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2124 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2125 cfg->move_in_progress = 0;
2126 }
2127
2128 desc->affinity = mask;
2129}
2130
2131static int migrate_irq_remapped_level(int irq)
2132{
2133 int ret = -1;
2134 struct irq_desc *desc = irq_to_desc(irq);
2135
2136 mask_IO_APIC_irq(irq);
2137
2138 if (io_apic_level_ack_pending(irq)) {
2139 /*
2140 * Interrupt in progress. Migrating irq now will change the
2141 * vector information in the IO-APIC RTE and that will confuse
2142 * the EOI broadcast performed by cpu.
2143 * So, delay the irq migration to the next instance.
2144 */
2145 schedule_delayed_work(&ir_migration_work, 1);
2146 goto unmask;
2147 }
2148
2149 /* everthing is clear. we have right of way */
2150 migrate_ioapic_irq(irq, desc->pending_mask);
2151
2152 ret = 0;
2153 desc->status &= ~IRQ_MOVE_PENDING;
2154 cpus_clear(desc->pending_mask);
2155
2156unmask:
2157 unmask_IO_APIC_irq(irq);
2158 return ret;
2159}
2160
2161static void ir_irq_migration(struct work_struct *work)
2162{
2163 unsigned int irq;
2164 struct irq_desc *desc;
2165
2166 for_each_irq_desc(irq, desc) {
2167 if (desc->status & IRQ_MOVE_PENDING) {
2168 unsigned long flags;
2169
2170 spin_lock_irqsave(&desc->lock, flags);
2171 if (!desc->chip->set_affinity ||
2172 !(desc->status & IRQ_MOVE_PENDING)) {
2173 desc->status &= ~IRQ_MOVE_PENDING;
2174 spin_unlock_irqrestore(&desc->lock, flags);
2175 continue;
2176 }
2177
2178 desc->chip->set_affinity(irq, desc->pending_mask);
2179 spin_unlock_irqrestore(&desc->lock, flags);
2180 }
2181 }
2182}
2183
2184/*
2185 * Migrates the IRQ destination in the process context.
2186 */
2187static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
2188{
2189 struct irq_desc *desc = irq_to_desc(irq);
2190
2191 if (desc->status & IRQ_LEVEL) {
2192 desc->status |= IRQ_MOVE_PENDING;
2193 desc->pending_mask = mask;
2194 migrate_irq_remapped_level(irq);
2195 return;
2196 }
2197
2198 migrate_ioapic_irq(irq, mask);
2199}
2200#endif
2201
1400asmlinkage void smp_irq_move_cleanup_interrupt(void) 2202asmlinkage void smp_irq_move_cleanup_interrupt(void)
1401{ 2203{
1402 unsigned vector, me; 2204 unsigned vector, me;
1403 ack_APIC_irq(); 2205 ack_APIC_irq();
2206#ifdef CONFIG_X86_64
1404 exit_idle(); 2207 exit_idle();
2208#endif
1405 irq_enter(); 2209 irq_enter();
1406 2210
1407 me = smp_processor_id(); 2211 me = smp_processor_id();
@@ -1410,11 +2214,12 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
1410 struct irq_desc *desc; 2214 struct irq_desc *desc;
1411 struct irq_cfg *cfg; 2215 struct irq_cfg *cfg;
1412 irq = __get_cpu_var(vector_irq)[vector]; 2216 irq = __get_cpu_var(vector_irq)[vector];
1413 if (irq >= NR_IRQS) 2217
2218 desc = irq_to_desc(irq);
2219 if (!desc)
1414 continue; 2220 continue;
1415 2221
1416 desc = irq_desc + irq; 2222 cfg = irq_cfg(irq);
1417 cfg = irq_cfg + irq;
1418 spin_lock(&desc->lock); 2223 spin_lock(&desc->lock);
1419 if (!cfg->move_cleanup_count) 2224 if (!cfg->move_cleanup_count)
1420 goto unlock; 2225 goto unlock;
@@ -1433,7 +2238,7 @@ unlock:
1433 2238
1434static void irq_complete_move(unsigned int irq) 2239static void irq_complete_move(unsigned int irq)
1435{ 2240{
1436 struct irq_cfg *cfg = irq_cfg + irq; 2241 struct irq_cfg *cfg = irq_cfg(irq);
1437 unsigned vector, me; 2242 unsigned vector, me;
1438 2243
1439 if (likely(!cfg->move_in_progress)) 2244 if (likely(!cfg->move_in_progress))
@@ -1453,6 +2258,17 @@ static void irq_complete_move(unsigned int irq)
1453#else 2258#else
1454static inline void irq_complete_move(unsigned int irq) {} 2259static inline void irq_complete_move(unsigned int irq) {}
1455#endif 2260#endif
2261#ifdef CONFIG_INTR_REMAP
2262static void ack_x2apic_level(unsigned int irq)
2263{
2264 ack_x2APIC_irq();
2265}
2266
2267static void ack_x2apic_edge(unsigned int irq)
2268{
2269 ack_x2APIC_irq();
2270}
2271#endif
1456 2272
1457static void ack_apic_edge(unsigned int irq) 2273static void ack_apic_edge(unsigned int irq)
1458{ 2274{
@@ -1461,19 +2277,50 @@ static void ack_apic_edge(unsigned int irq)
1461 ack_APIC_irq(); 2277 ack_APIC_irq();
1462} 2278}
1463 2279
2280atomic_t irq_mis_count;
2281
1464static void ack_apic_level(unsigned int irq) 2282static void ack_apic_level(unsigned int irq)
1465{ 2283{
2284#ifdef CONFIG_X86_32
2285 unsigned long v;
2286 int i;
2287#endif
1466 int do_unmask_irq = 0; 2288 int do_unmask_irq = 0;
1467 2289
1468 irq_complete_move(irq); 2290 irq_complete_move(irq);
1469#ifdef CONFIG_GENERIC_PENDING_IRQ 2291#ifdef CONFIG_GENERIC_PENDING_IRQ
1470 /* If we are moving the irq we need to mask it */ 2292 /* If we are moving the irq we need to mask it */
1471 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { 2293 if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
1472 do_unmask_irq = 1; 2294 do_unmask_irq = 1;
1473 mask_IO_APIC_irq(irq); 2295 mask_IO_APIC_irq(irq);
1474 } 2296 }
1475#endif 2297#endif
1476 2298
2299#ifdef CONFIG_X86_32
2300 /*
2301 * It appears there is an erratum which affects at least version 0x11
2302 * of I/O APIC (that's the 82093AA and cores integrated into various
2303 * chipsets). Under certain conditions a level-triggered interrupt is
2304 * erroneously delivered as edge-triggered one but the respective IRR
2305 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2306 * message but it will never arrive and further interrupts are blocked
2307 * from the source. The exact reason is so far unknown, but the
2308 * phenomenon was observed when two consecutive interrupt requests
2309 * from a given source get delivered to the same CPU and the source is
2310 * temporarily disabled in between.
2311 *
2312 * A workaround is to simulate an EOI message manually. We achieve it
2313 * by setting the trigger mode to edge and then to level when the edge
2314 * trigger mode gets detected in the TMR of a local APIC for a
2315 * level-triggered interrupt. We mask the source for the time of the
2316 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2317 * The idea is from Manfred Spraul. --macro
2318 */
2319 i = irq_cfg(irq)->vector;
2320
2321 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2322#endif
2323
1477 /* 2324 /*
1478 * We must acknowledge the irq before we move it or the acknowledge will 2325 * We must acknowledge the irq before we move it or the acknowledge will
1479 * not propagate properly. 2326 * not propagate properly.
@@ -1512,24 +2359,51 @@ static void ack_apic_level(unsigned int irq)
1512 move_masked_irq(irq); 2359 move_masked_irq(irq);
1513 unmask_IO_APIC_irq(irq); 2360 unmask_IO_APIC_irq(irq);
1514 } 2361 }
2362
2363#ifdef CONFIG_X86_32
2364 if (!(v & (1 << (i & 0x1f)))) {
2365 atomic_inc(&irq_mis_count);
2366 spin_lock(&ioapic_lock);
2367 __mask_and_edge_IO_APIC_irq(irq);
2368 __unmask_and_level_IO_APIC_irq(irq);
2369 spin_unlock(&ioapic_lock);
2370 }
2371#endif
1515} 2372}
1516 2373
1517static struct irq_chip ioapic_chip __read_mostly = { 2374static struct irq_chip ioapic_chip __read_mostly = {
1518 .name = "IO-APIC", 2375 .name = "IO-APIC",
1519 .startup = startup_ioapic_irq, 2376 .startup = startup_ioapic_irq,
1520 .mask = mask_IO_APIC_irq, 2377 .mask = mask_IO_APIC_irq,
1521 .unmask = unmask_IO_APIC_irq, 2378 .unmask = unmask_IO_APIC_irq,
1522 .ack = ack_apic_edge, 2379 .ack = ack_apic_edge,
1523 .eoi = ack_apic_level, 2380 .eoi = ack_apic_level,
2381#ifdef CONFIG_SMP
2382 .set_affinity = set_ioapic_affinity_irq,
2383#endif
2384 .retrigger = ioapic_retrigger_irq,
2385};
2386
2387#ifdef CONFIG_INTR_REMAP
2388static struct irq_chip ir_ioapic_chip __read_mostly = {
2389 .name = "IR-IO-APIC",
2390 .startup = startup_ioapic_irq,
2391 .mask = mask_IO_APIC_irq,
2392 .unmask = unmask_IO_APIC_irq,
2393 .ack = ack_x2apic_edge,
2394 .eoi = ack_x2apic_level,
1524#ifdef CONFIG_SMP 2395#ifdef CONFIG_SMP
1525 .set_affinity = set_ioapic_affinity_irq, 2396 .set_affinity = set_ir_ioapic_affinity_irq,
1526#endif 2397#endif
1527 .retrigger = ioapic_retrigger_irq, 2398 .retrigger = ioapic_retrigger_irq,
1528}; 2399};
2400#endif
1529 2401
1530static inline void init_IO_APIC_traps(void) 2402static inline void init_IO_APIC_traps(void)
1531{ 2403{
1532 int irq; 2404 int irq;
2405 struct irq_desc *desc;
2406 struct irq_cfg *cfg;
1533 2407
1534 /* 2408 /*
1535 * NOTE! The local APIC isn't very good at handling 2409 * NOTE! The local APIC isn't very good at handling
@@ -1542,8 +2416,8 @@ static inline void init_IO_APIC_traps(void)
1542 * Also, we've got to be careful not to trash gate 2416 * Also, we've got to be careful not to trash gate
1543 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2417 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1544 */ 2418 */
1545 for (irq = 0; irq < NR_IRQS ; irq++) { 2419 for_each_irq_cfg(irq, cfg) {
1546 if (IO_APIC_IRQ(irq) && !irq_cfg[irq].vector) { 2420 if (IO_APIC_IRQ(irq) && !cfg->vector) {
1547 /* 2421 /*
1548 * Hmm.. We don't have an entry for this, 2422 * Hmm.. We don't have an entry for this,
1549 * so default to an old-fashioned 8259 2423 * so default to an old-fashioned 8259
@@ -1551,27 +2425,33 @@ static inline void init_IO_APIC_traps(void)
1551 */ 2425 */
1552 if (irq < 16) 2426 if (irq < 16)
1553 make_8259A_irq(irq); 2427 make_8259A_irq(irq);
1554 else 2428 else {
2429 desc = irq_to_desc(irq);
1555 /* Strange. Oh, well.. */ 2430 /* Strange. Oh, well.. */
1556 irq_desc[irq].chip = &no_irq_chip; 2431 desc->chip = &no_irq_chip;
2432 }
1557 } 2433 }
1558 } 2434 }
1559} 2435}
1560 2436
1561static void unmask_lapic_irq(unsigned int irq) 2437/*
2438 * The local APIC irq-chip implementation:
2439 */
2440
2441static void mask_lapic_irq(unsigned int irq)
1562{ 2442{
1563 unsigned long v; 2443 unsigned long v;
1564 2444
1565 v = apic_read(APIC_LVT0); 2445 v = apic_read(APIC_LVT0);
1566 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); 2446 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1567} 2447}
1568 2448
1569static void mask_lapic_irq(unsigned int irq) 2449static void unmask_lapic_irq(unsigned int irq)
1570{ 2450{
1571 unsigned long v; 2451 unsigned long v;
1572 2452
1573 v = apic_read(APIC_LVT0); 2453 v = apic_read(APIC_LVT0);
1574 apic_write(APIC_LVT0, v | APIC_LVT_MASKED); 2454 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
1575} 2455}
1576 2456
1577static void ack_lapic_irq (unsigned int irq) 2457static void ack_lapic_irq (unsigned int irq)
@@ -1588,7 +2468,10 @@ static struct irq_chip lapic_chip __read_mostly = {
1588 2468
1589static void lapic_register_intr(int irq) 2469static void lapic_register_intr(int irq)
1590{ 2470{
1591 irq_desc[irq].status &= ~IRQ_LEVEL; 2471 struct irq_desc *desc;
2472
2473 desc = irq_to_desc(irq);
2474 desc->status &= ~IRQ_LEVEL;
1592 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2475 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
1593 "edge"); 2476 "edge");
1594} 2477}
@@ -1596,19 +2479,19 @@ static void lapic_register_intr(int irq)
1596static void __init setup_nmi(void) 2479static void __init setup_nmi(void)
1597{ 2480{
1598 /* 2481 /*
1599 * Dirty trick to enable the NMI watchdog ... 2482 * Dirty trick to enable the NMI watchdog ...
1600 * We put the 8259A master into AEOI mode and 2483 * We put the 8259A master into AEOI mode and
1601 * unmask on all local APICs LVT0 as NMI. 2484 * unmask on all local APICs LVT0 as NMI.
1602 * 2485 *
1603 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire') 2486 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
1604 * is from Maciej W. Rozycki - so we do not have to EOI from 2487 * is from Maciej W. Rozycki - so we do not have to EOI from
1605 * the NMI handler or the timer interrupt. 2488 * the NMI handler or the timer interrupt.
1606 */ 2489 */
1607 printk(KERN_INFO "activating NMI Watchdog ..."); 2490 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
1608 2491
1609 enable_NMI_through_LVT0(); 2492 enable_NMI_through_LVT0();
1610 2493
1611 printk(" done.\n"); 2494 apic_printk(APIC_VERBOSE, " done.\n");
1612} 2495}
1613 2496
1614/* 2497/*
@@ -1625,12 +2508,17 @@ static inline void __init unlock_ExtINT_logic(void)
1625 unsigned char save_control, save_freq_select; 2508 unsigned char save_control, save_freq_select;
1626 2509
1627 pin = find_isa_irq_pin(8, mp_INT); 2510 pin = find_isa_irq_pin(8, mp_INT);
2511 if (pin == -1) {
2512 WARN_ON_ONCE(1);
2513 return;
2514 }
1628 apic = find_isa_irq_apic(8, mp_INT); 2515 apic = find_isa_irq_apic(8, mp_INT);
1629 if (pin == -1) 2516 if (apic == -1) {
2517 WARN_ON_ONCE(1);
1630 return; 2518 return;
2519 }
1631 2520
1632 entry0 = ioapic_read_entry(apic, pin); 2521 entry0 = ioapic_read_entry(apic, pin);
1633
1634 clear_IO_APIC_pin(apic, pin); 2522 clear_IO_APIC_pin(apic, pin);
1635 2523
1636 memset(&entry1, 0, sizeof(entry1)); 2524 memset(&entry1, 0, sizeof(entry1));
@@ -1665,23 +2553,38 @@ static inline void __init unlock_ExtINT_logic(void)
1665 ioapic_write_entry(apic, pin, entry0); 2553 ioapic_write_entry(apic, pin, entry0);
1666} 2554}
1667 2555
2556static int disable_timer_pin_1 __initdata;
2557/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2558static int __init disable_timer_pin_setup(char *arg)
2559{
2560 disable_timer_pin_1 = 1;
2561 return 0;
2562}
2563early_param("disable_timer_pin_1", disable_timer_pin_setup);
2564
2565int timer_through_8259 __initdata;
2566
1668/* 2567/*
1669 * This code may look a bit paranoid, but it's supposed to cooperate with 2568 * This code may look a bit paranoid, but it's supposed to cooperate with
1670 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ 2569 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
1671 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast 2570 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
1672 * fanatically on his truly buggy board. 2571 * fanatically on his truly buggy board.
1673 * 2572 *
1674 * FIXME: really need to revamp this for modern platforms only. 2573 * FIXME: really need to revamp this for all platforms.
1675 */ 2574 */
1676static inline void __init check_timer(void) 2575static inline void __init check_timer(void)
1677{ 2576{
1678 struct irq_cfg *cfg = irq_cfg + 0; 2577 struct irq_cfg *cfg = irq_cfg(0);
1679 int apic1, pin1, apic2, pin2; 2578 int apic1, pin1, apic2, pin2;
1680 unsigned long flags; 2579 unsigned long flags;
2580 unsigned int ver;
1681 int no_pin1 = 0; 2581 int no_pin1 = 0;
1682 2582
1683 local_irq_save(flags); 2583 local_irq_save(flags);
1684 2584
2585 ver = apic_read(APIC_LVR);
2586 ver = GET_APIC_VERSION(ver);
2587
1685 /* 2588 /*
1686 * get/set the timer IRQ vector: 2589 * get/set the timer IRQ vector:
1687 */ 2590 */
@@ -1690,10 +2593,18 @@ static inline void __init check_timer(void)
1690 2593
1691 /* 2594 /*
1692 * As IRQ0 is to be enabled in the 8259A, the virtual 2595 * As IRQ0 is to be enabled in the 8259A, the virtual
1693 * wire has to be disabled in the local APIC. 2596 * wire has to be disabled in the local APIC. Also
2597 * timer interrupts need to be acknowledged manually in
2598 * the 8259A for the i82489DX when using the NMI
2599 * watchdog as that APIC treats NMIs as level-triggered.
2600 * The AEOI mode will finish them in the 8259A
2601 * automatically.
1694 */ 2602 */
1695 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 2603 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
1696 init_8259A(1); 2604 init_8259A(1);
2605#ifdef CONFIG_X86_32
2606 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
2607#endif
1697 2608
1698 pin1 = find_isa_irq_pin(0, mp_INT); 2609 pin1 = find_isa_irq_pin(0, mp_INT);
1699 apic1 = find_isa_irq_apic(0, mp_INT); 2610 apic1 = find_isa_irq_apic(0, mp_INT);
@@ -1712,6 +2623,10 @@ static inline void __init check_timer(void)
1712 * 8259A. 2623 * 8259A.
1713 */ 2624 */
1714 if (pin1 == -1) { 2625 if (pin1 == -1) {
2626#ifdef CONFIG_INTR_REMAP
2627 if (intr_remapping_enabled)
2628 panic("BIOS bug: timer not connected to IO-APIC");
2629#endif
1715 pin1 = pin2; 2630 pin1 = pin2;
1716 apic1 = apic2; 2631 apic1 = apic2;
1717 no_pin1 = 1; 2632 no_pin1 = 1;
@@ -1729,7 +2644,7 @@ static inline void __init check_timer(void)
1729 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2644 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
1730 } 2645 }
1731 unmask_IO_APIC_irq(0); 2646 unmask_IO_APIC_irq(0);
1732 if (!no_timer_check && timer_irq_works()) { 2647 if (timer_irq_works()) {
1733 if (nmi_watchdog == NMI_IO_APIC) { 2648 if (nmi_watchdog == NMI_IO_APIC) {
1734 setup_nmi(); 2649 setup_nmi();
1735 enable_8259A_irq(0); 2650 enable_8259A_irq(0);
@@ -1738,6 +2653,10 @@ static inline void __init check_timer(void)
1738 clear_IO_APIC_pin(0, pin1); 2653 clear_IO_APIC_pin(0, pin1);
1739 goto out; 2654 goto out;
1740 } 2655 }
2656#ifdef CONFIG_INTR_REMAP
2657 if (intr_remapping_enabled)
2658 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2659#endif
1741 clear_IO_APIC_pin(apic1, pin1); 2660 clear_IO_APIC_pin(apic1, pin1);
1742 if (!no_pin1) 2661 if (!no_pin1)
1743 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " 2662 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
@@ -1777,6 +2696,9 @@ static inline void __init check_timer(void)
1777 "through the IO-APIC - disabling NMI Watchdog!\n"); 2696 "through the IO-APIC - disabling NMI Watchdog!\n");
1778 nmi_watchdog = NMI_NONE; 2697 nmi_watchdog = NMI_NONE;
1779 } 2698 }
2699#ifdef CONFIG_X86_32
2700 timer_ack = 0;
2701#endif
1780 2702
1781 apic_printk(APIC_QUIET, KERN_INFO 2703 apic_printk(APIC_QUIET, KERN_INFO
1782 "...trying to set up timer as Virtual Wire IRQ...\n"); 2704 "...trying to set up timer as Virtual Wire IRQ...\n");
@@ -1813,13 +2735,6 @@ out:
1813 local_irq_restore(flags); 2735 local_irq_restore(flags);
1814} 2736}
1815 2737
1816static int __init notimercheck(char *s)
1817{
1818 no_timer_check = 1;
1819 return 1;
1820}
1821__setup("no_timer_check", notimercheck);
1822
1823/* 2738/*
1824 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available 2739 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
1825 * to devices. However there may be an I/O APIC pin available for 2740 * to devices. However there may be an I/O APIC pin available for
@@ -1837,27 +2752,49 @@ __setup("no_timer_check", notimercheck);
1837 * the I/O APIC in all cases now. No actual device should request 2752 * the I/O APIC in all cases now. No actual device should request
1838 * it anyway. --macro 2753 * it anyway. --macro
1839 */ 2754 */
1840#define PIC_IRQS (1<<2) 2755#define PIC_IRQS (1 << PIC_CASCADE_IR)
1841 2756
1842void __init setup_IO_APIC(void) 2757void __init setup_IO_APIC(void)
1843{ 2758{
1844 2759
2760#ifdef CONFIG_X86_32
2761 enable_IO_APIC();
2762#else
1845 /* 2763 /*
1846 * calling enable_IO_APIC() is moved to setup_local_APIC for BP 2764 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
1847 */ 2765 */
2766#endif
1848 2767
1849 io_apic_irqs = ~PIC_IRQS; 2768 io_apic_irqs = ~PIC_IRQS;
1850 2769
1851 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); 2770 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
1852 2771 /*
2772 * Set up IO-APIC IRQ routing.
2773 */
2774#ifdef CONFIG_X86_32
2775 if (!acpi_ioapic)
2776 setup_ioapic_ids_from_mpc();
2777#endif
1853 sync_Arb_IDs(); 2778 sync_Arb_IDs();
1854 setup_IO_APIC_irqs(); 2779 setup_IO_APIC_irqs();
1855 init_IO_APIC_traps(); 2780 init_IO_APIC_traps();
1856 check_timer(); 2781 check_timer();
1857 if (!acpi_ioapic)
1858 print_IO_APIC();
1859} 2782}
1860 2783
2784/*
2785 * Called after all the initialization is done. If we didnt find any
2786 * APIC bugs then we can allow the modify fast path
2787 */
2788
2789static int __init io_apic_bug_finalize(void)
2790{
2791 if (sis_apic_bug == -1)
2792 sis_apic_bug = 0;
2793 return 0;
2794}
2795
2796late_initcall(io_apic_bug_finalize);
2797
1861struct sysfs_ioapic_data { 2798struct sysfs_ioapic_data {
1862 struct sys_device dev; 2799 struct sys_device dev;
1863 struct IO_APIC_route_entry entry[0]; 2800 struct IO_APIC_route_entry entry[0];
@@ -1945,38 +2882,60 @@ device_initcall(ioapic_init_sysfs);
1945/* 2882/*
1946 * Dynamic irq allocate and deallocation 2883 * Dynamic irq allocate and deallocation
1947 */ 2884 */
1948int create_irq(void) 2885unsigned int create_irq_nr(unsigned int irq_want)
1949{ 2886{
1950 /* Allocate an unused irq */ 2887 /* Allocate an unused irq */
1951 int irq; 2888 unsigned int irq;
1952 int new; 2889 unsigned int new;
1953 unsigned long flags; 2890 unsigned long flags;
2891 struct irq_cfg *cfg_new;
1954 2892
1955 irq = -ENOSPC; 2893 irq_want = nr_irqs - 1;
2894
2895 irq = 0;
1956 spin_lock_irqsave(&vector_lock, flags); 2896 spin_lock_irqsave(&vector_lock, flags);
1957 for (new = (NR_IRQS - 1); new >= 0; new--) { 2897 for (new = irq_want; new > 0; new--) {
1958 if (platform_legacy_irq(new)) 2898 if (platform_legacy_irq(new))
1959 continue; 2899 continue;
1960 if (irq_cfg[new].vector != 0) 2900 cfg_new = irq_cfg(new);
2901 if (cfg_new && cfg_new->vector != 0)
1961 continue; 2902 continue;
2903 /* check if need to create one */
2904 if (!cfg_new)
2905 cfg_new = irq_cfg_alloc(new);
1962 if (__assign_irq_vector(new, TARGET_CPUS) == 0) 2906 if (__assign_irq_vector(new, TARGET_CPUS) == 0)
1963 irq = new; 2907 irq = new;
1964 break; 2908 break;
1965 } 2909 }
1966 spin_unlock_irqrestore(&vector_lock, flags); 2910 spin_unlock_irqrestore(&vector_lock, flags);
1967 2911
1968 if (irq >= 0) { 2912 if (irq > 0) {
1969 dynamic_irq_init(irq); 2913 dynamic_irq_init(irq);
1970 } 2914 }
1971 return irq; 2915 return irq;
1972} 2916}
1973 2917
2918int create_irq(void)
2919{
2920 int irq;
2921
2922 irq = create_irq_nr(nr_irqs - 1);
2923
2924 if (irq == 0)
2925 irq = -1;
2926
2927 return irq;
2928}
2929
1974void destroy_irq(unsigned int irq) 2930void destroy_irq(unsigned int irq)
1975{ 2931{
1976 unsigned long flags; 2932 unsigned long flags;
1977 2933
1978 dynamic_irq_cleanup(irq); 2934 dynamic_irq_cleanup(irq);
1979 2935
2936#ifdef CONFIG_INTR_REMAP
2937 free_irte(irq);
2938#endif
1980 spin_lock_irqsave(&vector_lock, flags); 2939 spin_lock_irqsave(&vector_lock, flags);
1981 __clear_irq_vector(irq); 2940 __clear_irq_vector(irq);
1982 spin_unlock_irqrestore(&vector_lock, flags); 2941 spin_unlock_irqrestore(&vector_lock, flags);
@@ -1988,17 +2947,49 @@ void destroy_irq(unsigned int irq)
1988#ifdef CONFIG_PCI_MSI 2947#ifdef CONFIG_PCI_MSI
1989static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) 2948static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
1990{ 2949{
1991 struct irq_cfg *cfg = irq_cfg + irq; 2950 struct irq_cfg *cfg;
1992 int err; 2951 int err;
1993 unsigned dest; 2952 unsigned dest;
1994 cpumask_t tmp; 2953 cpumask_t tmp;
1995 2954
1996 tmp = TARGET_CPUS; 2955 tmp = TARGET_CPUS;
1997 err = assign_irq_vector(irq, tmp); 2956 err = assign_irq_vector(irq, tmp);
1998 if (!err) { 2957 if (err)
1999 cpus_and(tmp, cfg->domain, tmp); 2958 return err;
2000 dest = cpu_mask_to_apicid(tmp); 2959
2960 cfg = irq_cfg(irq);
2961 cpus_and(tmp, cfg->domain, tmp);
2962 dest = cpu_mask_to_apicid(tmp);
2963
2964#ifdef CONFIG_INTR_REMAP
2965 if (irq_remapped(irq)) {
2966 struct irte irte;
2967 int ir_index;
2968 u16 sub_handle;
2969
2970 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
2971 BUG_ON(ir_index == -1);
2972
2973 memset (&irte, 0, sizeof(irte));
2001 2974
2975 irte.present = 1;
2976 irte.dst_mode = INT_DEST_MODE;
2977 irte.trigger_mode = 0; /* edge */
2978 irte.dlvry_mode = INT_DELIVERY_MODE;
2979 irte.vector = cfg->vector;
2980 irte.dest_id = IRTE_DEST(dest);
2981
2982 modify_irte(irq, &irte);
2983
2984 msg->address_hi = MSI_ADDR_BASE_HI;
2985 msg->data = sub_handle;
2986 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
2987 MSI_ADDR_IR_SHV |
2988 MSI_ADDR_IR_INDEX1(ir_index) |
2989 MSI_ADDR_IR_INDEX2(ir_index);
2990 } else
2991#endif
2992 {
2002 msg->address_hi = MSI_ADDR_BASE_HI; 2993 msg->address_hi = MSI_ADDR_BASE_HI;
2003 msg->address_lo = 2994 msg->address_lo =
2004 MSI_ADDR_BASE_LO | 2995 MSI_ADDR_BASE_LO |
@@ -2024,10 +3015,11 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
2024#ifdef CONFIG_SMP 3015#ifdef CONFIG_SMP
2025static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 3016static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2026{ 3017{
2027 struct irq_cfg *cfg = irq_cfg + irq; 3018 struct irq_cfg *cfg;
2028 struct msi_msg msg; 3019 struct msi_msg msg;
2029 unsigned int dest; 3020 unsigned int dest;
2030 cpumask_t tmp; 3021 cpumask_t tmp;
3022 struct irq_desc *desc;
2031 3023
2032 cpus_and(tmp, mask, cpu_online_map); 3024 cpus_and(tmp, mask, cpu_online_map);
2033 if (cpus_empty(tmp)) 3025 if (cpus_empty(tmp))
@@ -2036,6 +3028,7 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2036 if (assign_irq_vector(irq, mask)) 3028 if (assign_irq_vector(irq, mask))
2037 return; 3029 return;
2038 3030
3031 cfg = irq_cfg(irq);
2039 cpus_and(tmp, cfg->domain, mask); 3032 cpus_and(tmp, cfg->domain, mask);
2040 dest = cpu_mask_to_apicid(tmp); 3033 dest = cpu_mask_to_apicid(tmp);
2041 3034
@@ -2047,8 +3040,61 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2047 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3040 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2048 3041
2049 write_msi_msg(irq, &msg); 3042 write_msi_msg(irq, &msg);
2050 irq_desc[irq].affinity = mask; 3043 desc = irq_to_desc(irq);
3044 desc->affinity = mask;
3045}
3046
3047#ifdef CONFIG_INTR_REMAP
3048/*
3049 * Migrate the MSI irq to another cpumask. This migration is
3050 * done in the process context using interrupt-remapping hardware.
3051 */
3052static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3053{
3054 struct irq_cfg *cfg;
3055 unsigned int dest;
3056 cpumask_t tmp, cleanup_mask;
3057 struct irte irte;
3058 struct irq_desc *desc;
3059
3060 cpus_and(tmp, mask, cpu_online_map);
3061 if (cpus_empty(tmp))
3062 return;
3063
3064 if (get_irte(irq, &irte))
3065 return;
3066
3067 if (assign_irq_vector(irq, mask))
3068 return;
3069
3070 cfg = irq_cfg(irq);
3071 cpus_and(tmp, cfg->domain, mask);
3072 dest = cpu_mask_to_apicid(tmp);
3073
3074 irte.vector = cfg->vector;
3075 irte.dest_id = IRTE_DEST(dest);
3076
3077 /*
3078 * atomically update the IRTE with the new destination and vector.
3079 */
3080 modify_irte(irq, &irte);
3081
3082 /*
3083 * After this point, all the interrupts will start arriving
3084 * at the new destination. So, time to cleanup the previous
3085 * vector allocation.
3086 */
3087 if (cfg->move_in_progress) {
3088 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
3089 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
3090 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
3091 cfg->move_in_progress = 0;
3092 }
3093
3094 desc = irq_to_desc(irq);
3095 desc->affinity = mask;
2051} 3096}
3097#endif
2052#endif /* CONFIG_SMP */ 3098#endif /* CONFIG_SMP */
2053 3099
2054/* 3100/*
@@ -2066,26 +3112,179 @@ static struct irq_chip msi_chip = {
2066 .retrigger = ioapic_retrigger_irq, 3112 .retrigger = ioapic_retrigger_irq,
2067}; 3113};
2068 3114
2069int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) 3115#ifdef CONFIG_INTR_REMAP
3116static struct irq_chip msi_ir_chip = {
3117 .name = "IR-PCI-MSI",
3118 .unmask = unmask_msi_irq,
3119 .mask = mask_msi_irq,
3120 .ack = ack_x2apic_edge,
3121#ifdef CONFIG_SMP
3122 .set_affinity = ir_set_msi_irq_affinity,
3123#endif
3124 .retrigger = ioapic_retrigger_irq,
3125};
3126
3127/*
3128 * Map the PCI dev to the corresponding remapping hardware unit
3129 * and allocate 'nvec' consecutive interrupt-remapping table entries
3130 * in it.
3131 */
3132static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3133{
3134 struct intel_iommu *iommu;
3135 int index;
3136
3137 iommu = map_dev_to_ir(dev);
3138 if (!iommu) {
3139 printk(KERN_ERR
3140 "Unable to map PCI %s to iommu\n", pci_name(dev));
3141 return -ENOENT;
3142 }
3143
3144 index = alloc_irte(iommu, irq, nvec);
3145 if (index < 0) {
3146 printk(KERN_ERR
3147 "Unable to allocate %d IRTE for PCI %s\n", nvec,
3148 pci_name(dev));
3149 return -ENOSPC;
3150 }
3151 return index;
3152}
3153#endif
3154
3155static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
2070{ 3156{
3157 int ret;
2071 struct msi_msg msg; 3158 struct msi_msg msg;
2072 int irq, ret;
2073 irq = create_irq();
2074 if (irq < 0)
2075 return irq;
2076 3159
2077 ret = msi_compose_msg(dev, irq, &msg); 3160 ret = msi_compose_msg(dev, irq, &msg);
3161 if (ret < 0)
3162 return ret;
3163
3164 set_irq_msi(irq, desc);
3165 write_msi_msg(irq, &msg);
3166
3167#ifdef CONFIG_INTR_REMAP
3168 if (irq_remapped(irq)) {
3169 struct irq_desc *desc = irq_to_desc(irq);
3170 /*
3171 * irq migration in process context
3172 */
3173 desc->status |= IRQ_MOVE_PCNTXT;
3174 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
3175 } else
3176#endif
3177 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
3178
3179 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
3180
3181 return 0;
3182}
3183
3184static unsigned int build_irq_for_pci_dev(struct pci_dev *dev)
3185{
3186 unsigned int irq;
3187
3188 irq = dev->bus->number;
3189 irq <<= 8;
3190 irq |= dev->devfn;
3191 irq <<= 12;
3192
3193 return irq;
3194}
3195
3196int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
3197{
3198 unsigned int irq;
3199 int ret;
3200 unsigned int irq_want;
3201
3202 irq_want = build_irq_for_pci_dev(dev) + 0x100;
3203
3204 irq = create_irq_nr(irq_want);
3205 if (irq == 0)
3206 return -1;
3207
3208#ifdef CONFIG_INTR_REMAP
3209 if (!intr_remapping_enabled)
3210 goto no_ir;
3211
3212 ret = msi_alloc_irte(dev, irq, 1);
3213 if (ret < 0)
3214 goto error;
3215no_ir:
3216#endif
3217 ret = setup_msi_irq(dev, desc, irq);
2078 if (ret < 0) { 3218 if (ret < 0) {
2079 destroy_irq(irq); 3219 destroy_irq(irq);
2080 return ret; 3220 return ret;
2081 } 3221 }
3222 return 0;
2082 3223
2083 set_irq_msi(irq, desc); 3224#ifdef CONFIG_INTR_REMAP
2084 write_msi_msg(irq, &msg); 3225error:
3226 destroy_irq(irq);
3227 return ret;
3228#endif
3229}
3230
3231int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3232{
3233 unsigned int irq;
3234 int ret, sub_handle;
3235 struct msi_desc *desc;
3236 unsigned int irq_want;
2085 3237
2086 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); 3238#ifdef CONFIG_INTR_REMAP
3239 struct intel_iommu *iommu = 0;
3240 int index = 0;
3241#endif
2087 3242
3243 irq_want = build_irq_for_pci_dev(dev) + 0x100;
3244 sub_handle = 0;
3245 list_for_each_entry(desc, &dev->msi_list, list) {
3246 irq = create_irq_nr(irq_want--);
3247 if (irq == 0)
3248 return -1;
3249#ifdef CONFIG_INTR_REMAP
3250 if (!intr_remapping_enabled)
3251 goto no_ir;
3252
3253 if (!sub_handle) {
3254 /*
3255 * allocate the consecutive block of IRTE's
3256 * for 'nvec'
3257 */
3258 index = msi_alloc_irte(dev, irq, nvec);
3259 if (index < 0) {
3260 ret = index;
3261 goto error;
3262 }
3263 } else {
3264 iommu = map_dev_to_ir(dev);
3265 if (!iommu) {
3266 ret = -ENOENT;
3267 goto error;
3268 }
3269 /*
3270 * setup the mapping between the irq and the IRTE
3271 * base index, the sub_handle pointing to the
3272 * appropriate interrupt remap table entry.
3273 */
3274 set_irte_irq(irq, iommu, index, sub_handle);
3275 }
3276no_ir:
3277#endif
3278 ret = setup_msi_irq(dev, desc, irq);
3279 if (ret < 0)
3280 goto error;
3281 sub_handle++;
3282 }
2088 return 0; 3283 return 0;
3284
3285error:
3286 destroy_irq(irq);
3287 return ret;
2089} 3288}
2090 3289
2091void arch_teardown_msi_irq(unsigned int irq) 3290void arch_teardown_msi_irq(unsigned int irq)
@@ -2097,10 +3296,11 @@ void arch_teardown_msi_irq(unsigned int irq)
2097#ifdef CONFIG_SMP 3296#ifdef CONFIG_SMP
2098static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) 3297static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
2099{ 3298{
2100 struct irq_cfg *cfg = irq_cfg + irq; 3299 struct irq_cfg *cfg;
2101 struct msi_msg msg; 3300 struct msi_msg msg;
2102 unsigned int dest; 3301 unsigned int dest;
2103 cpumask_t tmp; 3302 cpumask_t tmp;
3303 struct irq_desc *desc;
2104 3304
2105 cpus_and(tmp, mask, cpu_online_map); 3305 cpus_and(tmp, mask, cpu_online_map);
2106 if (cpus_empty(tmp)) 3306 if (cpus_empty(tmp))
@@ -2109,6 +3309,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
2109 if (assign_irq_vector(irq, mask)) 3309 if (assign_irq_vector(irq, mask))
2110 return; 3310 return;
2111 3311
3312 cfg = irq_cfg(irq);
2112 cpus_and(tmp, cfg->domain, mask); 3313 cpus_and(tmp, cfg->domain, mask);
2113 dest = cpu_mask_to_apicid(tmp); 3314 dest = cpu_mask_to_apicid(tmp);
2114 3315
@@ -2120,7 +3321,8 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
2120 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3321 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2121 3322
2122 dmar_msi_write(irq, &msg); 3323 dmar_msi_write(irq, &msg);
2123 irq_desc[irq].affinity = mask; 3324 desc = irq_to_desc(irq);
3325 desc->affinity = mask;
2124} 3326}
2125#endif /* CONFIG_SMP */ 3327#endif /* CONFIG_SMP */
2126 3328
@@ -2150,6 +3352,69 @@ int arch_setup_dmar_msi(unsigned int irq)
2150} 3352}
2151#endif 3353#endif
2152 3354
3355#ifdef CONFIG_HPET_TIMER
3356
3357#ifdef CONFIG_SMP
3358static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3359{
3360 struct irq_cfg *cfg;
3361 struct irq_desc *desc;
3362 struct msi_msg msg;
3363 unsigned int dest;
3364 cpumask_t tmp;
3365
3366 cpus_and(tmp, mask, cpu_online_map);
3367 if (cpus_empty(tmp))
3368 return;
3369
3370 if (assign_irq_vector(irq, mask))
3371 return;
3372
3373 cfg = irq_cfg(irq);
3374 cpus_and(tmp, cfg->domain, mask);
3375 dest = cpu_mask_to_apicid(tmp);
3376
3377 hpet_msi_read(irq, &msg);
3378
3379 msg.data &= ~MSI_DATA_VECTOR_MASK;
3380 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3381 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3382 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3383
3384 hpet_msi_write(irq, &msg);
3385 desc = irq_to_desc(irq);
3386 desc->affinity = mask;
3387}
3388#endif /* CONFIG_SMP */
3389
3390struct irq_chip hpet_msi_type = {
3391 .name = "HPET_MSI",
3392 .unmask = hpet_msi_unmask,
3393 .mask = hpet_msi_mask,
3394 .ack = ack_apic_edge,
3395#ifdef CONFIG_SMP
3396 .set_affinity = hpet_msi_set_affinity,
3397#endif
3398 .retrigger = ioapic_retrigger_irq,
3399};
3400
3401int arch_setup_hpet_msi(unsigned int irq)
3402{
3403 int ret;
3404 struct msi_msg msg;
3405
3406 ret = msi_compose_msg(NULL, irq, &msg);
3407 if (ret < 0)
3408 return ret;
3409
3410 hpet_msi_write(irq, &msg);
3411 set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq,
3412 "edge");
3413
3414 return 0;
3415}
3416#endif
3417
2153#endif /* CONFIG_PCI_MSI */ 3418#endif /* CONFIG_PCI_MSI */
2154/* 3419/*
2155 * Hypertransport interrupt support 3420 * Hypertransport interrupt support
@@ -2174,9 +3439,10 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
2174 3439
2175static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) 3440static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
2176{ 3441{
2177 struct irq_cfg *cfg = irq_cfg + irq; 3442 struct irq_cfg *cfg;
2178 unsigned int dest; 3443 unsigned int dest;
2179 cpumask_t tmp; 3444 cpumask_t tmp;
3445 struct irq_desc *desc;
2180 3446
2181 cpus_and(tmp, mask, cpu_online_map); 3447 cpus_and(tmp, mask, cpu_online_map);
2182 if (cpus_empty(tmp)) 3448 if (cpus_empty(tmp))
@@ -2185,11 +3451,13 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
2185 if (assign_irq_vector(irq, mask)) 3451 if (assign_irq_vector(irq, mask))
2186 return; 3452 return;
2187 3453
3454 cfg = irq_cfg(irq);
2188 cpus_and(tmp, cfg->domain, mask); 3455 cpus_and(tmp, cfg->domain, mask);
2189 dest = cpu_mask_to_apicid(tmp); 3456 dest = cpu_mask_to_apicid(tmp);
2190 3457
2191 target_ht_irq(irq, dest, cfg->vector); 3458 target_ht_irq(irq, dest, cfg->vector);
2192 irq_desc[irq].affinity = mask; 3459 desc = irq_to_desc(irq);
3460 desc->affinity = mask;
2193} 3461}
2194#endif 3462#endif
2195 3463
@@ -2206,7 +3474,7 @@ static struct irq_chip ht_irq_chip = {
2206 3474
2207int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) 3475int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
2208{ 3476{
2209 struct irq_cfg *cfg = irq_cfg + irq; 3477 struct irq_cfg *cfg;
2210 int err; 3478 int err;
2211 cpumask_t tmp; 3479 cpumask_t tmp;
2212 3480
@@ -2216,6 +3484,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
2216 struct ht_irq_msg msg; 3484 struct ht_irq_msg msg;
2217 unsigned dest; 3485 unsigned dest;
2218 3486
3487 cfg = irq_cfg(irq);
2219 cpus_and(tmp, cfg->domain, tmp); 3488 cpus_and(tmp, cfg->domain, tmp);
2220 dest = cpu_mask_to_apicid(tmp); 3489 dest = cpu_mask_to_apicid(tmp);
2221 3490
@@ -2238,20 +3507,196 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
2238 3507
2239 set_irq_chip_and_handler_name(irq, &ht_irq_chip, 3508 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
2240 handle_edge_irq, "edge"); 3509 handle_edge_irq, "edge");
3510
3511 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
2241 } 3512 }
2242 return err; 3513 return err;
2243} 3514}
2244#endif /* CONFIG_HT_IRQ */ 3515#endif /* CONFIG_HT_IRQ */
2245 3516
3517#ifdef CONFIG_X86_64
3518/*
3519 * Re-target the irq to the specified CPU and enable the specified MMR located
3520 * on the specified blade to allow the sending of MSIs to the specified CPU.
3521 */
3522int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3523 unsigned long mmr_offset)
3524{
3525 const cpumask_t *eligible_cpu = get_cpu_mask(cpu);
3526 struct irq_cfg *cfg;
3527 int mmr_pnode;
3528 unsigned long mmr_value;
3529 struct uv_IO_APIC_route_entry *entry;
3530 unsigned long flags;
3531 int err;
3532
3533 err = assign_irq_vector(irq, *eligible_cpu);
3534 if (err != 0)
3535 return err;
3536
3537 spin_lock_irqsave(&vector_lock, flags);
3538 set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
3539 irq_name);
3540 spin_unlock_irqrestore(&vector_lock, flags);
3541
3542 cfg = irq_cfg(irq);
3543
3544 mmr_value = 0;
3545 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
3546 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
3547
3548 entry->vector = cfg->vector;
3549 entry->delivery_mode = INT_DELIVERY_MODE;
3550 entry->dest_mode = INT_DEST_MODE;
3551 entry->polarity = 0;
3552 entry->trigger = 0;
3553 entry->mask = 0;
3554 entry->dest = cpu_mask_to_apicid(*eligible_cpu);
3555
3556 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3557 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
3558
3559 return irq;
3560}
3561
3562/*
3563 * Disable the specified MMR located on the specified blade so that MSIs are
3564 * longer allowed to be sent.
3565 */
3566void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset)
3567{
3568 unsigned long mmr_value;
3569 struct uv_IO_APIC_route_entry *entry;
3570 int mmr_pnode;
3571
3572 mmr_value = 0;
3573 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
3574 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
3575
3576 entry->mask = 1;
3577
3578 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3579 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
3580}
3581#endif /* CONFIG_X86_64 */
3582
3583int __init io_apic_get_redir_entries (int ioapic)
3584{
3585 union IO_APIC_reg_01 reg_01;
3586 unsigned long flags;
3587
3588 spin_lock_irqsave(&ioapic_lock, flags);
3589 reg_01.raw = io_apic_read(ioapic, 1);
3590 spin_unlock_irqrestore(&ioapic_lock, flags);
3591
3592 return reg_01.bits.entries;
3593}
3594
3595int __init probe_nr_irqs(void)
3596{
3597 int idx;
3598 int nr = 0;
3599#ifndef CONFIG_XEN
3600 int nr_min = 32;
3601#else
3602 int nr_min = NR_IRQS;
3603#endif
3604
3605 for (idx = 0; idx < nr_ioapics; idx++)
3606 nr += io_apic_get_redir_entries(idx) + 1;
3607
3608 /* double it for hotplug and msi and nmi */
3609 nr <<= 1;
3610
3611 /* something wrong ? */
3612 if (nr < nr_min)
3613 nr = nr_min;
3614
3615 return nr;
3616}
3617
2246/* -------------------------------------------------------------------------- 3618/* --------------------------------------------------------------------------
2247 ACPI-based IOAPIC Configuration 3619 ACPI-based IOAPIC Configuration
2248 -------------------------------------------------------------------------- */ 3620 -------------------------------------------------------------------------- */
2249 3621
2250#ifdef CONFIG_ACPI 3622#ifdef CONFIG_ACPI
2251 3623
2252#define IO_APIC_MAX_ID 0xFE 3624#ifdef CONFIG_X86_32
3625int __init io_apic_get_unique_id(int ioapic, int apic_id)
3626{
3627 union IO_APIC_reg_00 reg_00;
3628 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
3629 physid_mask_t tmp;
3630 unsigned long flags;
3631 int i = 0;
2253 3632
2254int __init io_apic_get_redir_entries (int ioapic) 3633 /*
3634 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3635 * buses (one for LAPICs, one for IOAPICs), where predecessors only
3636 * supports up to 16 on one shared APIC bus.
3637 *
3638 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3639 * advantage of new APIC bus architecture.
3640 */
3641
3642 if (physids_empty(apic_id_map))
3643 apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
3644
3645 spin_lock_irqsave(&ioapic_lock, flags);
3646 reg_00.raw = io_apic_read(ioapic, 0);
3647 spin_unlock_irqrestore(&ioapic_lock, flags);
3648
3649 if (apic_id >= get_physical_broadcast()) {
3650 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
3651 "%d\n", ioapic, apic_id, reg_00.bits.ID);
3652 apic_id = reg_00.bits.ID;
3653 }
3654
3655 /*
3656 * Every APIC in a system must have a unique ID or we get lots of nice
3657 * 'stuck on smp_invalidate_needed IPI wait' messages.
3658 */
3659 if (check_apicid_used(apic_id_map, apic_id)) {
3660
3661 for (i = 0; i < get_physical_broadcast(); i++) {
3662 if (!check_apicid_used(apic_id_map, i))
3663 break;
3664 }
3665
3666 if (i == get_physical_broadcast())
3667 panic("Max apic_id exceeded!\n");
3668
3669 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
3670 "trying %d\n", ioapic, apic_id, i);
3671
3672 apic_id = i;
3673 }
3674
3675 tmp = apicid_to_cpu_present(apic_id);
3676 physids_or(apic_id_map, apic_id_map, tmp);
3677
3678 if (reg_00.bits.ID != apic_id) {
3679 reg_00.bits.ID = apic_id;
3680
3681 spin_lock_irqsave(&ioapic_lock, flags);
3682 io_apic_write(ioapic, 0, reg_00.raw);
3683 reg_00.raw = io_apic_read(ioapic, 0);
3684 spin_unlock_irqrestore(&ioapic_lock, flags);
3685
3686 /* Sanity check */
3687 if (reg_00.bits.ID != apic_id) {
3688 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
3689 return -1;
3690 }
3691 }
3692
3693 apic_printk(APIC_VERBOSE, KERN_INFO
3694 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
3695
3696 return apic_id;
3697}
3698
3699int __init io_apic_get_version(int ioapic)
2255{ 3700{
2256 union IO_APIC_reg_01 reg_01; 3701 union IO_APIC_reg_01 reg_01;
2257 unsigned long flags; 3702 unsigned long flags;
@@ -2260,9 +3705,9 @@ int __init io_apic_get_redir_entries (int ioapic)
2260 reg_01.raw = io_apic_read(ioapic, 1); 3705 reg_01.raw = io_apic_read(ioapic, 1);
2261 spin_unlock_irqrestore(&ioapic_lock, flags); 3706 spin_unlock_irqrestore(&ioapic_lock, flags);
2262 3707
2263 return reg_01.bits.entries; 3708 return reg_01.bits.version;
2264} 3709}
2265 3710#endif
2266 3711
2267int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity) 3712int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
2268{ 3713{
@@ -2314,6 +3759,7 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
2314void __init setup_ioapic_dest(void) 3759void __init setup_ioapic_dest(void)
2315{ 3760{
2316 int pin, ioapic, irq, irq_entry; 3761 int pin, ioapic, irq, irq_entry;
3762 struct irq_cfg *cfg;
2317 3763
2318 if (skip_ioapic_setup == 1) 3764 if (skip_ioapic_setup == 1)
2319 return; 3765 return;
@@ -2329,10 +3775,15 @@ void __init setup_ioapic_dest(void)
2329 * when you have too many devices, because at that time only boot 3775 * when you have too many devices, because at that time only boot
2330 * cpu is online. 3776 * cpu is online.
2331 */ 3777 */
2332 if (!irq_cfg[irq].vector) 3778 cfg = irq_cfg(irq);
3779 if (!cfg->vector)
2333 setup_IO_APIC_irq(ioapic, pin, irq, 3780 setup_IO_APIC_irq(ioapic, pin, irq,
2334 irq_trigger(irq_entry), 3781 irq_trigger(irq_entry),
2335 irq_polarity(irq_entry)); 3782 irq_polarity(irq_entry));
3783#ifdef CONFIG_INTR_REMAP
3784 else if (intr_remapping_enabled)
3785 set_ir_ioapic_affinity_irq(irq, TARGET_CPUS);
3786#endif
2336 else 3787 else
2337 set_ioapic_affinity_irq(irq, TARGET_CPUS); 3788 set_ioapic_affinity_irq(irq, TARGET_CPUS);
2338 } 3789 }
@@ -2383,18 +3834,33 @@ void __init ioapic_init_mappings(void)
2383 struct resource *ioapic_res; 3834 struct resource *ioapic_res;
2384 int i; 3835 int i;
2385 3836
3837 irq_2_pin_init();
2386 ioapic_res = ioapic_setup_resources(); 3838 ioapic_res = ioapic_setup_resources();
2387 for (i = 0; i < nr_ioapics; i++) { 3839 for (i = 0; i < nr_ioapics; i++) {
2388 if (smp_found_config) { 3840 if (smp_found_config) {
2389 ioapic_phys = mp_ioapics[i].mp_apicaddr; 3841 ioapic_phys = mp_ioapics[i].mp_apicaddr;
3842#ifdef CONFIG_X86_32
3843 if (!ioapic_phys) {
3844 printk(KERN_ERR
3845 "WARNING: bogus zero IO-APIC "
3846 "address found in MPTABLE, "
3847 "disabling IO/APIC support!\n");
3848 smp_found_config = 0;
3849 skip_ioapic_setup = 1;
3850 goto fake_ioapic_page;
3851 }
3852#endif
2390 } else { 3853 } else {
3854#ifdef CONFIG_X86_32
3855fake_ioapic_page:
3856#endif
2391 ioapic_phys = (unsigned long) 3857 ioapic_phys = (unsigned long)
2392 alloc_bootmem_pages(PAGE_SIZE); 3858 alloc_bootmem_pages(PAGE_SIZE);
2393 ioapic_phys = __pa(ioapic_phys); 3859 ioapic_phys = __pa(ioapic_phys);
2394 } 3860 }
2395 set_fixmap_nocache(idx, ioapic_phys); 3861 set_fixmap_nocache(idx, ioapic_phys);
2396 apic_printk(APIC_VERBOSE, 3862 apic_printk(APIC_VERBOSE,
2397 "mapped IOAPIC to %016lx (%016lx)\n", 3863 "mapped IOAPIC to %08lx (%08lx)\n",
2398 __fix_to_virt(idx), ioapic_phys); 3864 __fix_to_virt(idx), ioapic_phys);
2399 idx++; 3865 idx++;
2400 3866
@@ -2428,4 +3894,3 @@ static int __init ioapic_insert_resources(void)
2428/* Insert the IO APIC resources after PCI initialization has occured to handle 3894/* Insert the IO APIC resources after PCI initialization has occured to handle
2429 * IO APICS that are mapped in on a BAR in PCI space. */ 3895 * IO APICS that are mapped in on a BAR in PCI space. */
2430late_initcall(ioapic_insert_resources); 3896late_initcall(ioapic_insert_resources);
2431
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
deleted file mode 100644
index 09cddb57bec4..000000000000
--- a/arch/x86/kernel/io_apic_32.c
+++ /dev/null
@@ -1,2901 +0,0 @@
1/*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23#include <linux/mm.h>
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/sched.h>
28#include <linux/bootmem.h>
29#include <linux/mc146818rtc.h>
30#include <linux/compiler.h>
31#include <linux/acpi.h>
32#include <linux/module.h>
33#include <linux/sysdev.h>
34#include <linux/pci.h>
35#include <linux/msi.h>
36#include <linux/htirq.h>
37#include <linux/freezer.h>
38#include <linux/kthread.h>
39#include <linux/jiffies.h> /* time_after() */
40
41#include <asm/io.h>
42#include <asm/smp.h>
43#include <asm/desc.h>
44#include <asm/timer.h>
45#include <asm/i8259.h>
46#include <asm/nmi.h>
47#include <asm/msidef.h>
48#include <asm/hypertransport.h>
49
50#include <mach_apic.h>
51#include <mach_apicdef.h>
52
53int (*ioapic_renumber_irq)(int ioapic, int irq);
54atomic_t irq_mis_count;
55
56/* Where if anywhere is the i8259 connect in external int mode */
57static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
58
59static DEFINE_SPINLOCK(ioapic_lock);
60DEFINE_SPINLOCK(vector_lock);
61
62int timer_through_8259 __initdata;
63
64/*
65 * Is the SiS APIC rmw bug present ?
66 * -1 = don't know, 0 = no, 1 = yes
67 */
68int sis_apic_bug = -1;
69
70/*
71 * # of IRQ routing registers
72 */
73int nr_ioapic_registers[MAX_IO_APICS];
74
75/* I/O APIC entries */
76struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
77int nr_ioapics;
78
79/* MP IRQ source entries */
80struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
81
82/* # of MP IRQ source entries */
83int mp_irq_entries;
84
85#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
86int mp_bus_id_to_type[MAX_MP_BUSSES];
87#endif
88
89DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
90
91static int disable_timer_pin_1 __initdata;
92
93/*
94 * Rough estimation of how many shared IRQs there are, can
95 * be changed anytime.
96 */
97#define MAX_PLUS_SHARED_IRQS NR_IRQS
98#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
99
100/*
101 * This is performance-critical, we want to do it O(1)
102 *
103 * the indexing order of this array favors 1:1 mappings
104 * between pins and IRQs.
105 */
106
107static struct irq_pin_list {
108 int apic, pin, next;
109} irq_2_pin[PIN_MAP_SIZE];
110
111struct io_apic {
112 unsigned int index;
113 unsigned int unused[3];
114 unsigned int data;
115};
116
117static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
118{
119 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
120 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
121}
122
123static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
124{
125 struct io_apic __iomem *io_apic = io_apic_base(apic);
126 writel(reg, &io_apic->index);
127 return readl(&io_apic->data);
128}
129
130static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
131{
132 struct io_apic __iomem *io_apic = io_apic_base(apic);
133 writel(reg, &io_apic->index);
134 writel(value, &io_apic->data);
135}
136
137/*
138 * Re-write a value: to be used for read-modify-write
139 * cycles where the read already set up the index register.
140 *
141 * Older SiS APIC requires we rewrite the index register
142 */
143static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
144{
145 volatile struct io_apic __iomem *io_apic = io_apic_base(apic);
146 if (sis_apic_bug)
147 writel(reg, &io_apic->index);
148 writel(value, &io_apic->data);
149}
150
151union entry_union {
152 struct { u32 w1, w2; };
153 struct IO_APIC_route_entry entry;
154};
155
156static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
157{
158 union entry_union eu;
159 unsigned long flags;
160 spin_lock_irqsave(&ioapic_lock, flags);
161 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
162 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
163 spin_unlock_irqrestore(&ioapic_lock, flags);
164 return eu.entry;
165}
166
167/*
168 * When we write a new IO APIC routing entry, we need to write the high
169 * word first! If the mask bit in the low word is clear, we will enable
170 * the interrupt, and we need to make sure the entry is fully populated
171 * before that happens.
172 */
173static void
174__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
175{
176 union entry_union eu;
177 eu.entry = e;
178 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
179 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
180}
181
182static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
183{
184 unsigned long flags;
185 spin_lock_irqsave(&ioapic_lock, flags);
186 __ioapic_write_entry(apic, pin, e);
187 spin_unlock_irqrestore(&ioapic_lock, flags);
188}
189
190/*
191 * When we mask an IO APIC routing entry, we need to write the low
192 * word first, in order to set the mask bit before we change the
193 * high bits!
194 */
195static void ioapic_mask_entry(int apic, int pin)
196{
197 unsigned long flags;
198 union entry_union eu = { .entry.mask = 1 };
199
200 spin_lock_irqsave(&ioapic_lock, flags);
201 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
202 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
203 spin_unlock_irqrestore(&ioapic_lock, flags);
204}
205
206/*
207 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
208 * shared ISA-space IRQs, so we have to support them. We are super
209 * fast in the common case, and fast for shared ISA-space IRQs.
210 */
211static void add_pin_to_irq(unsigned int irq, int apic, int pin)
212{
213 static int first_free_entry = NR_IRQS;
214 struct irq_pin_list *entry = irq_2_pin + irq;
215
216 while (entry->next)
217 entry = irq_2_pin + entry->next;
218
219 if (entry->pin != -1) {
220 entry->next = first_free_entry;
221 entry = irq_2_pin + entry->next;
222 if (++first_free_entry >= PIN_MAP_SIZE)
223 panic("io_apic.c: whoops");
224 }
225 entry->apic = apic;
226 entry->pin = pin;
227}
228
229/*
230 * Reroute an IRQ to a different pin.
231 */
232static void __init replace_pin_at_irq(unsigned int irq,
233 int oldapic, int oldpin,
234 int newapic, int newpin)
235{
236 struct irq_pin_list *entry = irq_2_pin + irq;
237
238 while (1) {
239 if (entry->apic == oldapic && entry->pin == oldpin) {
240 entry->apic = newapic;
241 entry->pin = newpin;
242 }
243 if (!entry->next)
244 break;
245 entry = irq_2_pin + entry->next;
246 }
247}
248
249static void __modify_IO_APIC_irq(unsigned int irq, unsigned long enable, unsigned long disable)
250{
251 struct irq_pin_list *entry = irq_2_pin + irq;
252 unsigned int pin, reg;
253
254 for (;;) {
255 pin = entry->pin;
256 if (pin == -1)
257 break;
258 reg = io_apic_read(entry->apic, 0x10 + pin*2);
259 reg &= ~disable;
260 reg |= enable;
261 io_apic_modify(entry->apic, 0x10 + pin*2, reg);
262 if (!entry->next)
263 break;
264 entry = irq_2_pin + entry->next;
265 }
266}
267
268/* mask = 1 */
269static void __mask_IO_APIC_irq(unsigned int irq)
270{
271 __modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED, 0);
272}
273
274/* mask = 0 */
275static void __unmask_IO_APIC_irq(unsigned int irq)
276{
277 __modify_IO_APIC_irq(irq, 0, IO_APIC_REDIR_MASKED);
278}
279
280/* mask = 1, trigger = 0 */
281static void __mask_and_edge_IO_APIC_irq(unsigned int irq)
282{
283 __modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED,
284 IO_APIC_REDIR_LEVEL_TRIGGER);
285}
286
287/* mask = 0, trigger = 1 */
288static void __unmask_and_level_IO_APIC_irq(unsigned int irq)
289{
290 __modify_IO_APIC_irq(irq, IO_APIC_REDIR_LEVEL_TRIGGER,
291 IO_APIC_REDIR_MASKED);
292}
293
294static void mask_IO_APIC_irq(unsigned int irq)
295{
296 unsigned long flags;
297
298 spin_lock_irqsave(&ioapic_lock, flags);
299 __mask_IO_APIC_irq(irq);
300 spin_unlock_irqrestore(&ioapic_lock, flags);
301}
302
303static void unmask_IO_APIC_irq(unsigned int irq)
304{
305 unsigned long flags;
306
307 spin_lock_irqsave(&ioapic_lock, flags);
308 __unmask_IO_APIC_irq(irq);
309 spin_unlock_irqrestore(&ioapic_lock, flags);
310}
311
312static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
313{
314 struct IO_APIC_route_entry entry;
315
316 /* Check delivery_mode to be sure we're not clearing an SMI pin */
317 entry = ioapic_read_entry(apic, pin);
318 if (entry.delivery_mode == dest_SMI)
319 return;
320
321 /*
322 * Disable it in the IO-APIC irq-routing table:
323 */
324 ioapic_mask_entry(apic, pin);
325}
326
327static void clear_IO_APIC(void)
328{
329 int apic, pin;
330
331 for (apic = 0; apic < nr_ioapics; apic++)
332 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
333 clear_IO_APIC_pin(apic, pin);
334}
335
336#ifdef CONFIG_SMP
337static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
338{
339 unsigned long flags;
340 int pin;
341 struct irq_pin_list *entry = irq_2_pin + irq;
342 unsigned int apicid_value;
343 cpumask_t tmp;
344
345 cpus_and(tmp, cpumask, cpu_online_map);
346 if (cpus_empty(tmp))
347 tmp = TARGET_CPUS;
348
349 cpus_and(cpumask, tmp, CPU_MASK_ALL);
350
351 apicid_value = cpu_mask_to_apicid(cpumask);
352 /* Prepare to do the io_apic_write */
353 apicid_value = apicid_value << 24;
354 spin_lock_irqsave(&ioapic_lock, flags);
355 for (;;) {
356 pin = entry->pin;
357 if (pin == -1)
358 break;
359 io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
360 if (!entry->next)
361 break;
362 entry = irq_2_pin + entry->next;
363 }
364 irq_desc[irq].affinity = cpumask;
365 spin_unlock_irqrestore(&ioapic_lock, flags);
366}
367
368#if defined(CONFIG_IRQBALANCE)
369# include <asm/processor.h> /* kernel_thread() */
370# include <linux/kernel_stat.h> /* kstat */
371# include <linux/slab.h> /* kmalloc() */
372# include <linux/timer.h>
373
374#define IRQBALANCE_CHECK_ARCH -999
375#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
376#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
377#define BALANCED_IRQ_MORE_DELTA (HZ/10)
378#define BALANCED_IRQ_LESS_DELTA (HZ)
379
380static int irqbalance_disabled __read_mostly = IRQBALANCE_CHECK_ARCH;
381static int physical_balance __read_mostly;
382static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL;
383
384static struct irq_cpu_info {
385 unsigned long *last_irq;
386 unsigned long *irq_delta;
387 unsigned long irq;
388} irq_cpu_data[NR_CPUS];
389
390#define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq)
391#define LAST_CPU_IRQ(cpu, irq) (irq_cpu_data[cpu].last_irq[irq])
392#define IRQ_DELTA(cpu, irq) (irq_cpu_data[cpu].irq_delta[irq])
393
394#define IDLE_ENOUGH(cpu,now) \
395 (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
396
397#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask)
398
399#define CPU_TO_PACKAGEINDEX(i) (first_cpu(per_cpu(cpu_sibling_map, i)))
400
401static cpumask_t balance_irq_affinity[NR_IRQS] = {
402 [0 ... NR_IRQS-1] = CPU_MASK_ALL
403};
404
405void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
406{
407 balance_irq_affinity[irq] = mask;
408}
409
410static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
411 unsigned long now, int direction)
412{
413 int search_idle = 1;
414 int cpu = curr_cpu;
415
416 goto inside;
417
418 do {
419 if (unlikely(cpu == curr_cpu))
420 search_idle = 0;
421inside:
422 if (direction == 1) {
423 cpu++;
424 if (cpu >= NR_CPUS)
425 cpu = 0;
426 } else {
427 cpu--;
428 if (cpu == -1)
429 cpu = NR_CPUS-1;
430 }
431 } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu, allowed_mask) ||
432 (search_idle && !IDLE_ENOUGH(cpu, now)));
433
434 return cpu;
435}
436
437static inline void balance_irq(int cpu, int irq)
438{
439 unsigned long now = jiffies;
440 cpumask_t allowed_mask;
441 unsigned int new_cpu;
442
443 if (irqbalance_disabled)
444 return;
445
446 cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]);
447 new_cpu = move(cpu, allowed_mask, now, 1);
448 if (cpu != new_cpu)
449 set_pending_irq(irq, cpumask_of_cpu(new_cpu));
450}
451
452static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
453{
454 int i, j;
455
456 for_each_online_cpu(i) {
457 for (j = 0; j < NR_IRQS; j++) {
458 if (!irq_desc[j].action)
459 continue;
460 /* Is it a significant load ? */
461 if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i), j) <
462 useful_load_threshold)
463 continue;
464 balance_irq(i, j);
465 }
466 }
467 balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
468 balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
469 return;
470}
471
472static void do_irq_balance(void)
473{
474 int i, j;
475 unsigned long max_cpu_irq = 0, min_cpu_irq = (~0);
476 unsigned long move_this_load = 0;
477 int max_loaded = 0, min_loaded = 0;
478 int load;
479 unsigned long useful_load_threshold = balanced_irq_interval + 10;
480 int selected_irq;
481 int tmp_loaded, first_attempt = 1;
482 unsigned long tmp_cpu_irq;
483 unsigned long imbalance = 0;
484 cpumask_t allowed_mask, target_cpu_mask, tmp;
485
486 for_each_possible_cpu(i) {
487 int package_index;
488 CPU_IRQ(i) = 0;
489 if (!cpu_online(i))
490 continue;
491 package_index = CPU_TO_PACKAGEINDEX(i);
492 for (j = 0; j < NR_IRQS; j++) {
493 unsigned long value_now, delta;
494 /* Is this an active IRQ or balancing disabled ? */
495 if (!irq_desc[j].action || irq_balancing_disabled(j))
496 continue;
497 if (package_index == i)
498 IRQ_DELTA(package_index, j) = 0;
499 /* Determine the total count per processor per IRQ */
500 value_now = (unsigned long) kstat_cpu(i).irqs[j];
501
502 /* Determine the activity per processor per IRQ */
503 delta = value_now - LAST_CPU_IRQ(i, j);
504
505 /* Update last_cpu_irq[][] for the next time */
506 LAST_CPU_IRQ(i, j) = value_now;
507
508 /* Ignore IRQs whose rate is less than the clock */
509 if (delta < useful_load_threshold)
510 continue;
511 /* update the load for the processor or package total */
512 IRQ_DELTA(package_index, j) += delta;
513
514 /* Keep track of the higher numbered sibling as well */
515 if (i != package_index)
516 CPU_IRQ(i) += delta;
517 /*
518 * We have sibling A and sibling B in the package
519 *
520 * cpu_irq[A] = load for cpu A + load for cpu B
521 * cpu_irq[B] = load for cpu B
522 */
523 CPU_IRQ(package_index) += delta;
524 }
525 }
526 /* Find the least loaded processor package */
527 for_each_online_cpu(i) {
528 if (i != CPU_TO_PACKAGEINDEX(i))
529 continue;
530 if (min_cpu_irq > CPU_IRQ(i)) {
531 min_cpu_irq = CPU_IRQ(i);
532 min_loaded = i;
533 }
534 }
535 max_cpu_irq = ULONG_MAX;
536
537tryanothercpu:
538 /*
539 * Look for heaviest loaded processor.
540 * We may come back to get the next heaviest loaded processor.
541 * Skip processors with trivial loads.
542 */
543 tmp_cpu_irq = 0;
544 tmp_loaded = -1;
545 for_each_online_cpu(i) {
546 if (i != CPU_TO_PACKAGEINDEX(i))
547 continue;
548 if (max_cpu_irq <= CPU_IRQ(i))
549 continue;
550 if (tmp_cpu_irq < CPU_IRQ(i)) {
551 tmp_cpu_irq = CPU_IRQ(i);
552 tmp_loaded = i;
553 }
554 }
555
556 if (tmp_loaded == -1) {
557 /*
558 * In the case of small number of heavy interrupt sources,
559 * loading some of the cpus too much. We use Ingo's original
560 * approach to rotate them around.
561 */
562 if (!first_attempt && imbalance >= useful_load_threshold) {
563 rotate_irqs_among_cpus(useful_load_threshold);
564 return;
565 }
566 goto not_worth_the_effort;
567 }
568
569 first_attempt = 0; /* heaviest search */
570 max_cpu_irq = tmp_cpu_irq; /* load */
571 max_loaded = tmp_loaded; /* processor */
572 imbalance = (max_cpu_irq - min_cpu_irq) / 2;
573
574 /*
575 * if imbalance is less than approx 10% of max load, then
576 * observe diminishing returns action. - quit
577 */
578 if (imbalance < (max_cpu_irq >> 3))
579 goto not_worth_the_effort;
580
581tryanotherirq:
582 /* if we select an IRQ to move that can't go where we want, then
583 * see if there is another one to try.
584 */
585 move_this_load = 0;
586 selected_irq = -1;
587 for (j = 0; j < NR_IRQS; j++) {
588 /* Is this an active IRQ? */
589 if (!irq_desc[j].action)
590 continue;
591 if (imbalance <= IRQ_DELTA(max_loaded, j))
592 continue;
593 /* Try to find the IRQ that is closest to the imbalance
594 * without going over.
595 */
596 if (move_this_load < IRQ_DELTA(max_loaded, j)) {
597 move_this_load = IRQ_DELTA(max_loaded, j);
598 selected_irq = j;
599 }
600 }
601 if (selected_irq == -1)
602 goto tryanothercpu;
603
604 imbalance = move_this_load;
605
606 /* For physical_balance case, we accumulated both load
607 * values in the one of the siblings cpu_irq[],
608 * to use the same code for physical and logical processors
609 * as much as possible.
610 *
611 * NOTE: the cpu_irq[] array holds the sum of the load for
612 * sibling A and sibling B in the slot for the lowest numbered
613 * sibling (A), _AND_ the load for sibling B in the slot for
614 * the higher numbered sibling.
615 *
616 * We seek the least loaded sibling by making the comparison
617 * (A+B)/2 vs B
618 */
619 load = CPU_IRQ(min_loaded) >> 1;
620 for_each_cpu_mask(j, per_cpu(cpu_sibling_map, min_loaded)) {
621 if (load > CPU_IRQ(j)) {
622 /* This won't change cpu_sibling_map[min_loaded] */
623 load = CPU_IRQ(j);
624 min_loaded = j;
625 }
626 }
627
628 cpus_and(allowed_mask,
629 cpu_online_map,
630 balance_irq_affinity[selected_irq]);
631 target_cpu_mask = cpumask_of_cpu(min_loaded);
632 cpus_and(tmp, target_cpu_mask, allowed_mask);
633
634 if (!cpus_empty(tmp)) {
635 /* mark for change destination */
636 set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
637
638 /* Since we made a change, come back sooner to
639 * check for more variation.
640 */
641 balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
642 balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
643 return;
644 }
645 goto tryanotherirq;
646
647not_worth_the_effort:
648 /*
649 * if we did not find an IRQ to move, then adjust the time interval
650 * upward
651 */
652 balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
653 balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
654 return;
655}
656
657static int balanced_irq(void *unused)
658{
659 int i;
660 unsigned long prev_balance_time = jiffies;
661 long time_remaining = balanced_irq_interval;
662
663 /* push everything to CPU 0 to give us a starting point. */
664 for (i = 0 ; i < NR_IRQS ; i++) {
665 irq_desc[i].pending_mask = cpumask_of_cpu(0);
666 set_pending_irq(i, cpumask_of_cpu(0));
667 }
668
669 set_freezable();
670 for ( ; ; ) {
671 time_remaining = schedule_timeout_interruptible(time_remaining);
672 try_to_freeze();
673 if (time_after(jiffies,
674 prev_balance_time+balanced_irq_interval)) {
675 preempt_disable();
676 do_irq_balance();
677 prev_balance_time = jiffies;
678 time_remaining = balanced_irq_interval;
679 preempt_enable();
680 }
681 }
682 return 0;
683}
684
685static int __init balanced_irq_init(void)
686{
687 int i;
688 struct cpuinfo_x86 *c;
689 cpumask_t tmp;
690
691 cpus_shift_right(tmp, cpu_online_map, 2);
692 c = &boot_cpu_data;
693 /* When not overwritten by the command line ask subarchitecture. */
694 if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
695 irqbalance_disabled = NO_BALANCE_IRQ;
696 if (irqbalance_disabled)
697 return 0;
698
699 /* disable irqbalance completely if there is only one processor online */
700 if (num_online_cpus() < 2) {
701 irqbalance_disabled = 1;
702 return 0;
703 }
704 /*
705 * Enable physical balance only if more than 1 physical processor
706 * is present
707 */
708 if (smp_num_siblings > 1 && !cpus_empty(tmp))
709 physical_balance = 1;
710
711 for_each_online_cpu(i) {
712 irq_cpu_data[i].irq_delta = kzalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
713 irq_cpu_data[i].last_irq = kzalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
714 if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
715 printk(KERN_ERR "balanced_irq_init: out of memory");
716 goto failed;
717 }
718 }
719
720 printk(KERN_INFO "Starting balanced_irq\n");
721 if (!IS_ERR(kthread_run(balanced_irq, NULL, "kirqd")))
722 return 0;
723 printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
724failed:
725 for_each_possible_cpu(i) {
726 kfree(irq_cpu_data[i].irq_delta);
727 irq_cpu_data[i].irq_delta = NULL;
728 kfree(irq_cpu_data[i].last_irq);
729 irq_cpu_data[i].last_irq = NULL;
730 }
731 return 0;
732}
733
734int __devinit irqbalance_disable(char *str)
735{
736 irqbalance_disabled = 1;
737 return 1;
738}
739
740__setup("noirqbalance", irqbalance_disable);
741
742late_initcall(balanced_irq_init);
743#endif /* CONFIG_IRQBALANCE */
744#endif /* CONFIG_SMP */
745
746#ifndef CONFIG_SMP
747void send_IPI_self(int vector)
748{
749 unsigned int cfg;
750
751 /*
752 * Wait for idle.
753 */
754 apic_wait_icr_idle();
755 cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
756 /*
757 * Send the IPI. The write to APIC_ICR fires this off.
758 */
759 apic_write(APIC_ICR, cfg);
760}
761#endif /* !CONFIG_SMP */
762
763
764/*
765 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
766 * specific CPU-side IRQs.
767 */
768
769#define MAX_PIRQS 8
770static int pirq_entries [MAX_PIRQS];
771static int pirqs_enabled;
772int skip_ioapic_setup;
773
774static int __init ioapic_pirq_setup(char *str)
775{
776 int i, max;
777 int ints[MAX_PIRQS+1];
778
779 get_options(str, ARRAY_SIZE(ints), ints);
780
781 for (i = 0; i < MAX_PIRQS; i++)
782 pirq_entries[i] = -1;
783
784 pirqs_enabled = 1;
785 apic_printk(APIC_VERBOSE, KERN_INFO
786 "PIRQ redirection, working around broken MP-BIOS.\n");
787 max = MAX_PIRQS;
788 if (ints[0] < MAX_PIRQS)
789 max = ints[0];
790
791 for (i = 0; i < max; i++) {
792 apic_printk(APIC_VERBOSE, KERN_DEBUG
793 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
794 /*
795 * PIRQs are mapped upside down, usually.
796 */
797 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
798 }
799 return 1;
800}
801
802__setup("pirq=", ioapic_pirq_setup);
803
804/*
805 * Find the IRQ entry number of a certain pin.
806 */
807static int find_irq_entry(int apic, int pin, int type)
808{
809 int i;
810
811 for (i = 0; i < mp_irq_entries; i++)
812 if (mp_irqs[i].mp_irqtype == type &&
813 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
814 mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
815 mp_irqs[i].mp_dstirq == pin)
816 return i;
817
818 return -1;
819}
820
821/*
822 * Find the pin to which IRQ[irq] (ISA) is connected
823 */
824static int __init find_isa_irq_pin(int irq, int type)
825{
826 int i;
827
828 for (i = 0; i < mp_irq_entries; i++) {
829 int lbus = mp_irqs[i].mp_srcbus;
830
831 if (test_bit(lbus, mp_bus_not_pci) &&
832 (mp_irqs[i].mp_irqtype == type) &&
833 (mp_irqs[i].mp_srcbusirq == irq))
834
835 return mp_irqs[i].mp_dstirq;
836 }
837 return -1;
838}
839
840static int __init find_isa_irq_apic(int irq, int type)
841{
842 int i;
843
844 for (i = 0; i < mp_irq_entries; i++) {
845 int lbus = mp_irqs[i].mp_srcbus;
846
847 if (test_bit(lbus, mp_bus_not_pci) &&
848 (mp_irqs[i].mp_irqtype == type) &&
849 (mp_irqs[i].mp_srcbusirq == irq))
850 break;
851 }
852 if (i < mp_irq_entries) {
853 int apic;
854 for (apic = 0; apic < nr_ioapics; apic++) {
855 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
856 return apic;
857 }
858 }
859
860 return -1;
861}
862
863/*
864 * Find a specific PCI IRQ entry.
865 * Not an __init, possibly needed by modules
866 */
867static int pin_2_irq(int idx, int apic, int pin);
868
869int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
870{
871 int apic, i, best_guess = -1;
872
873 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, "
874 "slot:%d, pin:%d.\n", bus, slot, pin);
875 if (test_bit(bus, mp_bus_not_pci)) {
876 printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
877 return -1;
878 }
879 for (i = 0; i < mp_irq_entries; i++) {
880 int lbus = mp_irqs[i].mp_srcbus;
881
882 for (apic = 0; apic < nr_ioapics; apic++)
883 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
884 mp_irqs[i].mp_dstapic == MP_APIC_ALL)
885 break;
886
887 if (!test_bit(lbus, mp_bus_not_pci) &&
888 !mp_irqs[i].mp_irqtype &&
889 (bus == lbus) &&
890 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
891 int irq = pin_2_irq(i, apic, mp_irqs[i].mp_dstirq);
892
893 if (!(apic || IO_APIC_IRQ(irq)))
894 continue;
895
896 if (pin == (mp_irqs[i].mp_srcbusirq & 3))
897 return irq;
898 /*
899 * Use the first all-but-pin matching entry as a
900 * best-guess fuzzy result for broken mptables.
901 */
902 if (best_guess < 0)
903 best_guess = irq;
904 }
905 }
906 return best_guess;
907}
908EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
909
910/*
911 * This function currently is only a helper for the i386 smp boot process where
912 * we need to reprogram the ioredtbls to cater for the cpus which have come online
913 * so mask in all cases should simply be TARGET_CPUS
914 */
915#ifdef CONFIG_SMP
916void __init setup_ioapic_dest(void)
917{
918 int pin, ioapic, irq, irq_entry;
919
920 if (skip_ioapic_setup == 1)
921 return;
922
923 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
924 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
925 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
926 if (irq_entry == -1)
927 continue;
928 irq = pin_2_irq(irq_entry, ioapic, pin);
929 set_ioapic_affinity_irq(irq, TARGET_CPUS);
930 }
931
932 }
933}
934#endif
935
936#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
937/*
938 * EISA Edge/Level control register, ELCR
939 */
940static int EISA_ELCR(unsigned int irq)
941{
942 if (irq < 16) {
943 unsigned int port = 0x4d0 + (irq >> 3);
944 return (inb(port) >> (irq & 7)) & 1;
945 }
946 apic_printk(APIC_VERBOSE, KERN_INFO
947 "Broken MPtable reports ISA irq %d\n", irq);
948 return 0;
949}
950#endif
951
952/* ISA interrupts are always polarity zero edge triggered,
953 * when listed as conforming in the MP table. */
954
955#define default_ISA_trigger(idx) (0)
956#define default_ISA_polarity(idx) (0)
957
958/* EISA interrupts are always polarity zero and can be edge or level
959 * trigger depending on the ELCR value. If an interrupt is listed as
960 * EISA conforming in the MP table, that means its trigger type must
961 * be read in from the ELCR */
962
963#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
964#define default_EISA_polarity(idx) default_ISA_polarity(idx)
965
966/* PCI interrupts are always polarity one level triggered,
967 * when listed as conforming in the MP table. */
968
969#define default_PCI_trigger(idx) (1)
970#define default_PCI_polarity(idx) (1)
971
972/* MCA interrupts are always polarity zero level triggered,
973 * when listed as conforming in the MP table. */
974
975#define default_MCA_trigger(idx) (1)
976#define default_MCA_polarity(idx) default_ISA_polarity(idx)
977
978static int MPBIOS_polarity(int idx)
979{
980 int bus = mp_irqs[idx].mp_srcbus;
981 int polarity;
982
983 /*
984 * Determine IRQ line polarity (high active or low active):
985 */
986 switch (mp_irqs[idx].mp_irqflag & 3) {
987 case 0: /* conforms, ie. bus-type dependent polarity */
988 {
989 polarity = test_bit(bus, mp_bus_not_pci)?
990 default_ISA_polarity(idx):
991 default_PCI_polarity(idx);
992 break;
993 }
994 case 1: /* high active */
995 {
996 polarity = 0;
997 break;
998 }
999 case 2: /* reserved */
1000 {
1001 printk(KERN_WARNING "broken BIOS!!\n");
1002 polarity = 1;
1003 break;
1004 }
1005 case 3: /* low active */
1006 {
1007 polarity = 1;
1008 break;
1009 }
1010 default: /* invalid */
1011 {
1012 printk(KERN_WARNING "broken BIOS!!\n");
1013 polarity = 1;
1014 break;
1015 }
1016 }
1017 return polarity;
1018}
1019
1020static int MPBIOS_trigger(int idx)
1021{
1022 int bus = mp_irqs[idx].mp_srcbus;
1023 int trigger;
1024
1025 /*
1026 * Determine IRQ trigger mode (edge or level sensitive):
1027 */
1028 switch ((mp_irqs[idx].mp_irqflag>>2) & 3) {
1029 case 0: /* conforms, ie. bus-type dependent */
1030 {
1031 trigger = test_bit(bus, mp_bus_not_pci)?
1032 default_ISA_trigger(idx):
1033 default_PCI_trigger(idx);
1034#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
1035 switch (mp_bus_id_to_type[bus]) {
1036 case MP_BUS_ISA: /* ISA pin */
1037 {
1038 /* set before the switch */
1039 break;
1040 }
1041 case MP_BUS_EISA: /* EISA pin */
1042 {
1043 trigger = default_EISA_trigger(idx);
1044 break;
1045 }
1046 case MP_BUS_PCI: /* PCI pin */
1047 {
1048 /* set before the switch */
1049 break;
1050 }
1051 case MP_BUS_MCA: /* MCA pin */
1052 {
1053 trigger = default_MCA_trigger(idx);
1054 break;
1055 }
1056 default:
1057 {
1058 printk(KERN_WARNING "broken BIOS!!\n");
1059 trigger = 1;
1060 break;
1061 }
1062 }
1063#endif
1064 break;
1065 }
1066 case 1: /* edge */
1067 {
1068 trigger = 0;
1069 break;
1070 }
1071 case 2: /* reserved */
1072 {
1073 printk(KERN_WARNING "broken BIOS!!\n");
1074 trigger = 1;
1075 break;
1076 }
1077 case 3: /* level */
1078 {
1079 trigger = 1;
1080 break;
1081 }
1082 default: /* invalid */
1083 {
1084 printk(KERN_WARNING "broken BIOS!!\n");
1085 trigger = 0;
1086 break;
1087 }
1088 }
1089 return trigger;
1090}
1091
1092static inline int irq_polarity(int idx)
1093{
1094 return MPBIOS_polarity(idx);
1095}
1096
1097static inline int irq_trigger(int idx)
1098{
1099 return MPBIOS_trigger(idx);
1100}
1101
1102static int pin_2_irq(int idx, int apic, int pin)
1103{
1104 int irq, i;
1105 int bus = mp_irqs[idx].mp_srcbus;
1106
1107 /*
1108 * Debugging check, we are in big trouble if this message pops up!
1109 */
1110 if (mp_irqs[idx].mp_dstirq != pin)
1111 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
1112
1113 if (test_bit(bus, mp_bus_not_pci))
1114 irq = mp_irqs[idx].mp_srcbusirq;
1115 else {
1116 /*
1117 * PCI IRQs are mapped in order
1118 */
1119 i = irq = 0;
1120 while (i < apic)
1121 irq += nr_ioapic_registers[i++];
1122 irq += pin;
1123
1124 /*
1125 * For MPS mode, so far only needed by ES7000 platform
1126 */
1127 if (ioapic_renumber_irq)
1128 irq = ioapic_renumber_irq(apic, irq);
1129 }
1130
1131 /*
1132 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1133 */
1134 if ((pin >= 16) && (pin <= 23)) {
1135 if (pirq_entries[pin-16] != -1) {
1136 if (!pirq_entries[pin-16]) {
1137 apic_printk(APIC_VERBOSE, KERN_DEBUG
1138 "disabling PIRQ%d\n", pin-16);
1139 } else {
1140 irq = pirq_entries[pin-16];
1141 apic_printk(APIC_VERBOSE, KERN_DEBUG
1142 "using PIRQ%d -> IRQ %d\n",
1143 pin-16, irq);
1144 }
1145 }
1146 }
1147 return irq;
1148}
1149
1150static inline int IO_APIC_irq_trigger(int irq)
1151{
1152 int apic, idx, pin;
1153
1154 for (apic = 0; apic < nr_ioapics; apic++) {
1155 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1156 idx = find_irq_entry(apic, pin, mp_INT);
1157 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1158 return irq_trigger(idx);
1159 }
1160 }
1161 /*
1162 * nonexistent IRQs are edge default
1163 */
1164 return 0;
1165}
1166
1167/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
1168static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
1169
1170static int __assign_irq_vector(int irq)
1171{
1172 static int current_vector = FIRST_DEVICE_VECTOR, current_offset;
1173 int vector, offset;
1174
1175 BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
1176
1177 if (irq_vector[irq] > 0)
1178 return irq_vector[irq];
1179
1180 vector = current_vector;
1181 offset = current_offset;
1182next:
1183 vector += 8;
1184 if (vector >= first_system_vector) {
1185 offset = (offset + 1) % 8;
1186 vector = FIRST_DEVICE_VECTOR + offset;
1187 }
1188 if (vector == current_vector)
1189 return -ENOSPC;
1190 if (test_and_set_bit(vector, used_vectors))
1191 goto next;
1192
1193 current_vector = vector;
1194 current_offset = offset;
1195 irq_vector[irq] = vector;
1196
1197 return vector;
1198}
1199
1200static int assign_irq_vector(int irq)
1201{
1202 unsigned long flags;
1203 int vector;
1204
1205 spin_lock_irqsave(&vector_lock, flags);
1206 vector = __assign_irq_vector(irq);
1207 spin_unlock_irqrestore(&vector_lock, flags);
1208
1209 return vector;
1210}
1211
1212static struct irq_chip ioapic_chip;
1213
1214#define IOAPIC_AUTO -1
1215#define IOAPIC_EDGE 0
1216#define IOAPIC_LEVEL 1
1217
1218static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
1219{
1220 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1221 trigger == IOAPIC_LEVEL) {
1222 irq_desc[irq].status |= IRQ_LEVEL;
1223 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1224 handle_fasteoi_irq, "fasteoi");
1225 } else {
1226 irq_desc[irq].status &= ~IRQ_LEVEL;
1227 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1228 handle_edge_irq, "edge");
1229 }
1230 set_intr_gate(vector, interrupt[irq]);
1231}
1232
1233static void __init setup_IO_APIC_irqs(void)
1234{
1235 struct IO_APIC_route_entry entry;
1236 int apic, pin, idx, irq, first_notcon = 1, vector;
1237
1238 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1239
1240 for (apic = 0; apic < nr_ioapics; apic++) {
1241 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1242
1243 /*
1244 * add it to the IO-APIC irq-routing table:
1245 */
1246 memset(&entry, 0, sizeof(entry));
1247
1248 entry.delivery_mode = INT_DELIVERY_MODE;
1249 entry.dest_mode = INT_DEST_MODE;
1250 entry.mask = 0; /* enable IRQ */
1251 entry.dest.logical.logical_dest =
1252 cpu_mask_to_apicid(TARGET_CPUS);
1253
1254 idx = find_irq_entry(apic, pin, mp_INT);
1255 if (idx == -1) {
1256 if (first_notcon) {
1257 apic_printk(APIC_VERBOSE, KERN_DEBUG
1258 " IO-APIC (apicid-pin) %d-%d",
1259 mp_ioapics[apic].mp_apicid,
1260 pin);
1261 first_notcon = 0;
1262 } else
1263 apic_printk(APIC_VERBOSE, ", %d-%d",
1264 mp_ioapics[apic].mp_apicid, pin);
1265 continue;
1266 }
1267
1268 if (!first_notcon) {
1269 apic_printk(APIC_VERBOSE, " not connected.\n");
1270 first_notcon = 1;
1271 }
1272
1273 entry.trigger = irq_trigger(idx);
1274 entry.polarity = irq_polarity(idx);
1275
1276 if (irq_trigger(idx)) {
1277 entry.trigger = 1;
1278 entry.mask = 1;
1279 }
1280
1281 irq = pin_2_irq(idx, apic, pin);
1282 /*
1283 * skip adding the timer int on secondary nodes, which causes
1284 * a small but painful rift in the time-space continuum
1285 */
1286 if (multi_timer_check(apic, irq))
1287 continue;
1288 else
1289 add_pin_to_irq(irq, apic, pin);
1290
1291 if (!apic && !IO_APIC_IRQ(irq))
1292 continue;
1293
1294 if (IO_APIC_IRQ(irq)) {
1295 vector = assign_irq_vector(irq);
1296 entry.vector = vector;
1297 ioapic_register_intr(irq, vector, IOAPIC_AUTO);
1298
1299 if (!apic && (irq < 16))
1300 disable_8259A_irq(irq);
1301 }
1302 ioapic_write_entry(apic, pin, entry);
1303 }
1304 }
1305
1306 if (!first_notcon)
1307 apic_printk(APIC_VERBOSE, " not connected.\n");
1308}
1309
1310/*
1311 * Set up the timer pin, possibly with the 8259A-master behind.
1312 */
1313static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1314 int vector)
1315{
1316 struct IO_APIC_route_entry entry;
1317
1318 memset(&entry, 0, sizeof(entry));
1319
1320 /*
1321 * We use logical delivery to get the timer IRQ
1322 * to the first CPU.
1323 */
1324 entry.dest_mode = INT_DEST_MODE;
1325 entry.mask = 1; /* mask IRQ now */
1326 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
1327 entry.delivery_mode = INT_DELIVERY_MODE;
1328 entry.polarity = 0;
1329 entry.trigger = 0;
1330 entry.vector = vector;
1331
1332 /*
1333 * The timer IRQ doesn't have to know that behind the
1334 * scene we may have a 8259A-master in AEOI mode ...
1335 */
1336 ioapic_register_intr(0, vector, IOAPIC_EDGE);
1337
1338 /*
1339 * Add it to the IO-APIC irq-routing table:
1340 */
1341 ioapic_write_entry(apic, pin, entry);
1342}
1343
1344void __init print_IO_APIC(void)
1345{
1346 int apic, i;
1347 union IO_APIC_reg_00 reg_00;
1348 union IO_APIC_reg_01 reg_01;
1349 union IO_APIC_reg_02 reg_02;
1350 union IO_APIC_reg_03 reg_03;
1351 unsigned long flags;
1352
1353 if (apic_verbosity == APIC_QUIET)
1354 return;
1355
1356 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1357 for (i = 0; i < nr_ioapics; i++)
1358 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1359 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
1360
1361 /*
1362 * We are a bit conservative about what we expect. We have to
1363 * know about every hardware change ASAP.
1364 */
1365 printk(KERN_INFO "testing the IO APIC.......................\n");
1366
1367 for (apic = 0; apic < nr_ioapics; apic++) {
1368
1369 spin_lock_irqsave(&ioapic_lock, flags);
1370 reg_00.raw = io_apic_read(apic, 0);
1371 reg_01.raw = io_apic_read(apic, 1);
1372 if (reg_01.bits.version >= 0x10)
1373 reg_02.raw = io_apic_read(apic, 2);
1374 if (reg_01.bits.version >= 0x20)
1375 reg_03.raw = io_apic_read(apic, 3);
1376 spin_unlock_irqrestore(&ioapic_lock, flags);
1377
1378 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
1379 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1380 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1381 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1382 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1383
1384 printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
1385 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1386
1387 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1388 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1389
1390 /*
1391 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1392 * but the value of reg_02 is read as the previous read register
1393 * value, so ignore it if reg_02 == reg_01.
1394 */
1395 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1396 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1397 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1398 }
1399
1400 /*
1401 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1402 * or reg_03, but the value of reg_0[23] is read as the previous read
1403 * register value, so ignore it if reg_03 == reg_0[12].
1404 */
1405 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1406 reg_03.raw != reg_01.raw) {
1407 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1408 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1409 }
1410
1411 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1412
1413 printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
1414 " Stat Dest Deli Vect: \n");
1415
1416 for (i = 0; i <= reg_01.bits.entries; i++) {
1417 struct IO_APIC_route_entry entry;
1418
1419 entry = ioapic_read_entry(apic, i);
1420
1421 printk(KERN_DEBUG " %02x %03X %02X ",
1422 i,
1423 entry.dest.logical.logical_dest,
1424 entry.dest.physical.physical_dest
1425 );
1426
1427 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1428 entry.mask,
1429 entry.trigger,
1430 entry.irr,
1431 entry.polarity,
1432 entry.delivery_status,
1433 entry.dest_mode,
1434 entry.delivery_mode,
1435 entry.vector
1436 );
1437 }
1438 }
1439 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1440 for (i = 0; i < NR_IRQS; i++) {
1441 struct irq_pin_list *entry = irq_2_pin + i;
1442 if (entry->pin < 0)
1443 continue;
1444 printk(KERN_DEBUG "IRQ%d ", i);
1445 for (;;) {
1446 printk("-> %d:%d", entry->apic, entry->pin);
1447 if (!entry->next)
1448 break;
1449 entry = irq_2_pin + entry->next;
1450 }
1451 printk("\n");
1452 }
1453
1454 printk(KERN_INFO ".................................... done.\n");
1455
1456 return;
1457}
1458
1459#if 0
1460
1461static void print_APIC_bitfield(int base)
1462{
1463 unsigned int v;
1464 int i, j;
1465
1466 if (apic_verbosity == APIC_QUIET)
1467 return;
1468
1469 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1470 for (i = 0; i < 8; i++) {
1471 v = apic_read(base + i*0x10);
1472 for (j = 0; j < 32; j++) {
1473 if (v & (1<<j))
1474 printk("1");
1475 else
1476 printk("0");
1477 }
1478 printk("\n");
1479 }
1480}
1481
1482void /*__init*/ print_local_APIC(void *dummy)
1483{
1484 unsigned int v, ver, maxlvt;
1485
1486 if (apic_verbosity == APIC_QUIET)
1487 return;
1488
1489 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1490 smp_processor_id(), hard_smp_processor_id());
1491 v = apic_read(APIC_ID);
1492 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v,
1493 GET_APIC_ID(read_apic_id()));
1494 v = apic_read(APIC_LVR);
1495 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1496 ver = GET_APIC_VERSION(v);
1497 maxlvt = lapic_get_maxlvt();
1498
1499 v = apic_read(APIC_TASKPRI);
1500 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1501
1502 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1503 v = apic_read(APIC_ARBPRI);
1504 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1505 v & APIC_ARBPRI_MASK);
1506 v = apic_read(APIC_PROCPRI);
1507 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1508 }
1509
1510 v = apic_read(APIC_EOI);
1511 printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
1512 v = apic_read(APIC_RRR);
1513 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1514 v = apic_read(APIC_LDR);
1515 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1516 v = apic_read(APIC_DFR);
1517 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1518 v = apic_read(APIC_SPIV);
1519 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1520
1521 printk(KERN_DEBUG "... APIC ISR field:\n");
1522 print_APIC_bitfield(APIC_ISR);
1523 printk(KERN_DEBUG "... APIC TMR field:\n");
1524 print_APIC_bitfield(APIC_TMR);
1525 printk(KERN_DEBUG "... APIC IRR field:\n");
1526 print_APIC_bitfield(APIC_IRR);
1527
1528 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1529 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1530 apic_write(APIC_ESR, 0);
1531 v = apic_read(APIC_ESR);
1532 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1533 }
1534
1535 v = apic_read(APIC_ICR);
1536 printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
1537 v = apic_read(APIC_ICR2);
1538 printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
1539
1540 v = apic_read(APIC_LVTT);
1541 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1542
1543 if (maxlvt > 3) { /* PC is LVT#4. */
1544 v = apic_read(APIC_LVTPC);
1545 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1546 }
1547 v = apic_read(APIC_LVT0);
1548 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1549 v = apic_read(APIC_LVT1);
1550 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1551
1552 if (maxlvt > 2) { /* ERR is LVT#3. */
1553 v = apic_read(APIC_LVTERR);
1554 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1555 }
1556
1557 v = apic_read(APIC_TMICT);
1558 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1559 v = apic_read(APIC_TMCCT);
1560 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1561 v = apic_read(APIC_TDCR);
1562 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1563 printk("\n");
1564}
1565
1566void print_all_local_APICs(void)
1567{
1568 on_each_cpu(print_local_APIC, NULL, 1);
1569}
1570
1571void /*__init*/ print_PIC(void)
1572{
1573 unsigned int v;
1574 unsigned long flags;
1575
1576 if (apic_verbosity == APIC_QUIET)
1577 return;
1578
1579 printk(KERN_DEBUG "\nprinting PIC contents\n");
1580
1581 spin_lock_irqsave(&i8259A_lock, flags);
1582
1583 v = inb(0xa1) << 8 | inb(0x21);
1584 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1585
1586 v = inb(0xa0) << 8 | inb(0x20);
1587 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1588
1589 outb(0x0b, 0xa0);
1590 outb(0x0b, 0x20);
1591 v = inb(0xa0) << 8 | inb(0x20);
1592 outb(0x0a, 0xa0);
1593 outb(0x0a, 0x20);
1594
1595 spin_unlock_irqrestore(&i8259A_lock, flags);
1596
1597 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1598
1599 v = inb(0x4d1) << 8 | inb(0x4d0);
1600 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1601}
1602
1603#endif /* 0 */
1604
1605static void __init enable_IO_APIC(void)
1606{
1607 union IO_APIC_reg_01 reg_01;
1608 int i8259_apic, i8259_pin;
1609 int i, apic;
1610 unsigned long flags;
1611
1612 for (i = 0; i < PIN_MAP_SIZE; i++) {
1613 irq_2_pin[i].pin = -1;
1614 irq_2_pin[i].next = 0;
1615 }
1616 if (!pirqs_enabled)
1617 for (i = 0; i < MAX_PIRQS; i++)
1618 pirq_entries[i] = -1;
1619
1620 /*
1621 * The number of IO-APIC IRQ registers (== #pins):
1622 */
1623 for (apic = 0; apic < nr_ioapics; apic++) {
1624 spin_lock_irqsave(&ioapic_lock, flags);
1625 reg_01.raw = io_apic_read(apic, 1);
1626 spin_unlock_irqrestore(&ioapic_lock, flags);
1627 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1628 }
1629 for (apic = 0; apic < nr_ioapics; apic++) {
1630 int pin;
1631 /* See if any of the pins is in ExtINT mode */
1632 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1633 struct IO_APIC_route_entry entry;
1634 entry = ioapic_read_entry(apic, pin);
1635
1636
1637 /* If the interrupt line is enabled and in ExtInt mode
1638 * I have found the pin where the i8259 is connected.
1639 */
1640 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1641 ioapic_i8259.apic = apic;
1642 ioapic_i8259.pin = pin;
1643 goto found_i8259;
1644 }
1645 }
1646 }
1647 found_i8259:
1648 /* Look to see what if the MP table has reported the ExtINT */
1649 /* If we could not find the appropriate pin by looking at the ioapic
1650 * the i8259 probably is not connected the ioapic but give the
1651 * mptable a chance anyway.
1652 */
1653 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1654 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1655 /* Trust the MP table if nothing is setup in the hardware */
1656 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1657 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1658 ioapic_i8259.pin = i8259_pin;
1659 ioapic_i8259.apic = i8259_apic;
1660 }
1661 /* Complain if the MP table and the hardware disagree */
1662 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1663 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1664 {
1665 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1666 }
1667
1668 /*
1669 * Do not trust the IO-APIC being empty at bootup
1670 */
1671 clear_IO_APIC();
1672}
1673
1674/*
1675 * Not an __init, needed by the reboot code
1676 */
1677void disable_IO_APIC(void)
1678{
1679 /*
1680 * Clear the IO-APIC before rebooting:
1681 */
1682 clear_IO_APIC();
1683
1684 /*
1685 * If the i8259 is routed through an IOAPIC
1686 * Put that IOAPIC in virtual wire mode
1687 * so legacy interrupts can be delivered.
1688 */
1689 if (ioapic_i8259.pin != -1) {
1690 struct IO_APIC_route_entry entry;
1691
1692 memset(&entry, 0, sizeof(entry));
1693 entry.mask = 0; /* Enabled */
1694 entry.trigger = 0; /* Edge */
1695 entry.irr = 0;
1696 entry.polarity = 0; /* High */
1697 entry.delivery_status = 0;
1698 entry.dest_mode = 0; /* Physical */
1699 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1700 entry.vector = 0;
1701 entry.dest.physical.physical_dest =
1702 GET_APIC_ID(read_apic_id());
1703
1704 /*
1705 * Add it to the IO-APIC irq-routing table:
1706 */
1707 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1708 }
1709 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1710}
1711
1712/*
1713 * function to set the IO-APIC physical IDs based on the
1714 * values stored in the MPC table.
1715 *
1716 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1717 */
1718
1719static void __init setup_ioapic_ids_from_mpc(void)
1720{
1721 union IO_APIC_reg_00 reg_00;
1722 physid_mask_t phys_id_present_map;
1723 int apic;
1724 int i;
1725 unsigned char old_id;
1726 unsigned long flags;
1727
1728#ifdef CONFIG_X86_NUMAQ
1729 if (found_numaq)
1730 return;
1731#endif
1732
1733 /*
1734 * Don't check I/O APIC IDs for xAPIC systems. They have
1735 * no meaning without the serial APIC bus.
1736 */
1737 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1738 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
1739 return;
1740 /*
1741 * This is broken; anything with a real cpu count has to
1742 * circumvent this idiocy regardless.
1743 */
1744 phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
1745
1746 /*
1747 * Set the IOAPIC ID to the value stored in the MPC table.
1748 */
1749 for (apic = 0; apic < nr_ioapics; apic++) {
1750
1751 /* Read the register 0 value */
1752 spin_lock_irqsave(&ioapic_lock, flags);
1753 reg_00.raw = io_apic_read(apic, 0);
1754 spin_unlock_irqrestore(&ioapic_lock, flags);
1755
1756 old_id = mp_ioapics[apic].mp_apicid;
1757
1758 if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
1759 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
1760 apic, mp_ioapics[apic].mp_apicid);
1761 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1762 reg_00.bits.ID);
1763 mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
1764 }
1765
1766 /*
1767 * Sanity check, is the ID really free? Every APIC in a
1768 * system must have a unique ID or we get lots of nice
1769 * 'stuck on smp_invalidate_needed IPI wait' messages.
1770 */
1771 if (check_apicid_used(phys_id_present_map,
1772 mp_ioapics[apic].mp_apicid)) {
1773 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
1774 apic, mp_ioapics[apic].mp_apicid);
1775 for (i = 0; i < get_physical_broadcast(); i++)
1776 if (!physid_isset(i, phys_id_present_map))
1777 break;
1778 if (i >= get_physical_broadcast())
1779 panic("Max APIC ID exceeded!\n");
1780 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1781 i);
1782 physid_set(i, phys_id_present_map);
1783 mp_ioapics[apic].mp_apicid = i;
1784 } else {
1785 physid_mask_t tmp;
1786 tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
1787 apic_printk(APIC_VERBOSE, "Setting %d in the "
1788 "phys_id_present_map\n",
1789 mp_ioapics[apic].mp_apicid);
1790 physids_or(phys_id_present_map, phys_id_present_map, tmp);
1791 }
1792
1793
1794 /*
1795 * We need to adjust the IRQ routing table
1796 * if the ID changed.
1797 */
1798 if (old_id != mp_ioapics[apic].mp_apicid)
1799 for (i = 0; i < mp_irq_entries; i++)
1800 if (mp_irqs[i].mp_dstapic == old_id)
1801 mp_irqs[i].mp_dstapic
1802 = mp_ioapics[apic].mp_apicid;
1803
1804 /*
1805 * Read the right value from the MPC table and
1806 * write it into the ID register.
1807 */
1808 apic_printk(APIC_VERBOSE, KERN_INFO
1809 "...changing IO-APIC physical APIC ID to %d ...",
1810 mp_ioapics[apic].mp_apicid);
1811
1812 reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
1813 spin_lock_irqsave(&ioapic_lock, flags);
1814 io_apic_write(apic, 0, reg_00.raw);
1815 spin_unlock_irqrestore(&ioapic_lock, flags);
1816
1817 /*
1818 * Sanity check
1819 */
1820 spin_lock_irqsave(&ioapic_lock, flags);
1821 reg_00.raw = io_apic_read(apic, 0);
1822 spin_unlock_irqrestore(&ioapic_lock, flags);
1823 if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
1824 printk("could not set ID!\n");
1825 else
1826 apic_printk(APIC_VERBOSE, " ok.\n");
1827 }
1828}
1829
1830int no_timer_check __initdata;
1831
1832static int __init notimercheck(char *s)
1833{
1834 no_timer_check = 1;
1835 return 1;
1836}
1837__setup("no_timer_check", notimercheck);
1838
1839/*
1840 * There is a nasty bug in some older SMP boards, their mptable lies
1841 * about the timer IRQ. We do the following to work around the situation:
1842 *
1843 * - timer IRQ defaults to IO-APIC IRQ
1844 * - if this function detects that timer IRQs are defunct, then we fall
1845 * back to ISA timer IRQs
1846 */
1847static int __init timer_irq_works(void)
1848{
1849 unsigned long t1 = jiffies;
1850 unsigned long flags;
1851
1852 if (no_timer_check)
1853 return 1;
1854
1855 local_save_flags(flags);
1856 local_irq_enable();
1857 /* Let ten ticks pass... */
1858 mdelay((10 * 1000) / HZ);
1859 local_irq_restore(flags);
1860
1861 /*
1862 * Expect a few ticks at least, to be sure some possible
1863 * glue logic does not lock up after one or two first
1864 * ticks in a non-ExtINT mode. Also the local APIC
1865 * might have cached one ExtINT interrupt. Finally, at
1866 * least one tick may be lost due to delays.
1867 */
1868 if (time_after(jiffies, t1 + 4))
1869 return 1;
1870
1871 return 0;
1872}
1873
1874/*
1875 * In the SMP+IOAPIC case it might happen that there are an unspecified
1876 * number of pending IRQ events unhandled. These cases are very rare,
1877 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1878 * better to do it this way as thus we do not have to be aware of
1879 * 'pending' interrupts in the IRQ path, except at this point.
1880 */
1881/*
1882 * Edge triggered needs to resend any interrupt
1883 * that was delayed but this is now handled in the device
1884 * independent code.
1885 */
1886
1887/*
1888 * Startup quirk:
1889 *
1890 * Starting up a edge-triggered IO-APIC interrupt is
1891 * nasty - we need to make sure that we get the edge.
1892 * If it is already asserted for some reason, we need
1893 * return 1 to indicate that is was pending.
1894 *
1895 * This is not complete - we should be able to fake
1896 * an edge even if it isn't on the 8259A...
1897 *
1898 * (We do this for level-triggered IRQs too - it cannot hurt.)
1899 */
1900static unsigned int startup_ioapic_irq(unsigned int irq)
1901{
1902 int was_pending = 0;
1903 unsigned long flags;
1904
1905 spin_lock_irqsave(&ioapic_lock, flags);
1906 if (irq < 16) {
1907 disable_8259A_irq(irq);
1908 if (i8259A_irq_pending(irq))
1909 was_pending = 1;
1910 }
1911 __unmask_IO_APIC_irq(irq);
1912 spin_unlock_irqrestore(&ioapic_lock, flags);
1913
1914 return was_pending;
1915}
1916
1917static void ack_ioapic_irq(unsigned int irq)
1918{
1919 move_native_irq(irq);
1920 ack_APIC_irq();
1921}
1922
1923static void ack_ioapic_quirk_irq(unsigned int irq)
1924{
1925 unsigned long v;
1926 int i;
1927
1928 move_native_irq(irq);
1929/*
1930 * It appears there is an erratum which affects at least version 0x11
1931 * of I/O APIC (that's the 82093AA and cores integrated into various
1932 * chipsets). Under certain conditions a level-triggered interrupt is
1933 * erroneously delivered as edge-triggered one but the respective IRR
1934 * bit gets set nevertheless. As a result the I/O unit expects an EOI
1935 * message but it will never arrive and further interrupts are blocked
1936 * from the source. The exact reason is so far unknown, but the
1937 * phenomenon was observed when two consecutive interrupt requests
1938 * from a given source get delivered to the same CPU and the source is
1939 * temporarily disabled in between.
1940 *
1941 * A workaround is to simulate an EOI message manually. We achieve it
1942 * by setting the trigger mode to edge and then to level when the edge
1943 * trigger mode gets detected in the TMR of a local APIC for a
1944 * level-triggered interrupt. We mask the source for the time of the
1945 * operation to prevent an edge-triggered interrupt escaping meanwhile.
1946 * The idea is from Manfred Spraul. --macro
1947 */
1948 i = irq_vector[irq];
1949
1950 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
1951
1952 ack_APIC_irq();
1953
1954 if (!(v & (1 << (i & 0x1f)))) {
1955 atomic_inc(&irq_mis_count);
1956 spin_lock(&ioapic_lock);
1957 __mask_and_edge_IO_APIC_irq(irq);
1958 __unmask_and_level_IO_APIC_irq(irq);
1959 spin_unlock(&ioapic_lock);
1960 }
1961}
1962
1963static int ioapic_retrigger_irq(unsigned int irq)
1964{
1965 send_IPI_self(irq_vector[irq]);
1966
1967 return 1;
1968}
1969
1970static struct irq_chip ioapic_chip __read_mostly = {
1971 .name = "IO-APIC",
1972 .startup = startup_ioapic_irq,
1973 .mask = mask_IO_APIC_irq,
1974 .unmask = unmask_IO_APIC_irq,
1975 .ack = ack_ioapic_irq,
1976 .eoi = ack_ioapic_quirk_irq,
1977#ifdef CONFIG_SMP
1978 .set_affinity = set_ioapic_affinity_irq,
1979#endif
1980 .retrigger = ioapic_retrigger_irq,
1981};
1982
1983
1984static inline void init_IO_APIC_traps(void)
1985{
1986 int irq;
1987
1988 /*
1989 * NOTE! The local APIC isn't very good at handling
1990 * multiple interrupts at the same interrupt level.
1991 * As the interrupt level is determined by taking the
1992 * vector number and shifting that right by 4, we
1993 * want to spread these out a bit so that they don't
1994 * all fall in the same interrupt level.
1995 *
1996 * Also, we've got to be careful not to trash gate
1997 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1998 */
1999 for (irq = 0; irq < NR_IRQS ; irq++) {
2000 if (IO_APIC_IRQ(irq) && !irq_vector[irq]) {
2001 /*
2002 * Hmm.. We don't have an entry for this,
2003 * so default to an old-fashioned 8259
2004 * interrupt if we can..
2005 */
2006 if (irq < 16)
2007 make_8259A_irq(irq);
2008 else
2009 /* Strange. Oh, well.. */
2010 irq_desc[irq].chip = &no_irq_chip;
2011 }
2012 }
2013}
2014
2015/*
2016 * The local APIC irq-chip implementation:
2017 */
2018
2019static void ack_lapic_irq(unsigned int irq)
2020{
2021 ack_APIC_irq();
2022}
2023
2024static void mask_lapic_irq(unsigned int irq)
2025{
2026 unsigned long v;
2027
2028 v = apic_read(APIC_LVT0);
2029 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2030}
2031
2032static void unmask_lapic_irq(unsigned int irq)
2033{
2034 unsigned long v;
2035
2036 v = apic_read(APIC_LVT0);
2037 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2038}
2039
2040static struct irq_chip lapic_chip __read_mostly = {
2041 .name = "local-APIC",
2042 .mask = mask_lapic_irq,
2043 .unmask = unmask_lapic_irq,
2044 .ack = ack_lapic_irq,
2045};
2046
2047static void lapic_register_intr(int irq, int vector)
2048{
2049 irq_desc[irq].status &= ~IRQ_LEVEL;
2050 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2051 "edge");
2052 set_intr_gate(vector, interrupt[irq]);
2053}
2054
2055static void __init setup_nmi(void)
2056{
2057 /*
2058 * Dirty trick to enable the NMI watchdog ...
2059 * We put the 8259A master into AEOI mode and
2060 * unmask on all local APICs LVT0 as NMI.
2061 *
2062 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2063 * is from Maciej W. Rozycki - so we do not have to EOI from
2064 * the NMI handler or the timer interrupt.
2065 */
2066 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
2067
2068 enable_NMI_through_LVT0();
2069
2070 apic_printk(APIC_VERBOSE, " done.\n");
2071}
2072
2073/*
2074 * This looks a bit hackish but it's about the only one way of sending
2075 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2076 * not support the ExtINT mode, unfortunately. We need to send these
2077 * cycles as some i82489DX-based boards have glue logic that keeps the
2078 * 8259A interrupt line asserted until INTA. --macro
2079 */
2080static inline void __init unlock_ExtINT_logic(void)
2081{
2082 int apic, pin, i;
2083 struct IO_APIC_route_entry entry0, entry1;
2084 unsigned char save_control, save_freq_select;
2085
2086 pin = find_isa_irq_pin(8, mp_INT);
2087 if (pin == -1) {
2088 WARN_ON_ONCE(1);
2089 return;
2090 }
2091 apic = find_isa_irq_apic(8, mp_INT);
2092 if (apic == -1) {
2093 WARN_ON_ONCE(1);
2094 return;
2095 }
2096
2097 entry0 = ioapic_read_entry(apic, pin);
2098 clear_IO_APIC_pin(apic, pin);
2099
2100 memset(&entry1, 0, sizeof(entry1));
2101
2102 entry1.dest_mode = 0; /* physical delivery */
2103 entry1.mask = 0; /* unmask IRQ now */
2104 entry1.dest.physical.physical_dest = hard_smp_processor_id();
2105 entry1.delivery_mode = dest_ExtINT;
2106 entry1.polarity = entry0.polarity;
2107 entry1.trigger = 0;
2108 entry1.vector = 0;
2109
2110 ioapic_write_entry(apic, pin, entry1);
2111
2112 save_control = CMOS_READ(RTC_CONTROL);
2113 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2114 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2115 RTC_FREQ_SELECT);
2116 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2117
2118 i = 100;
2119 while (i-- > 0) {
2120 mdelay(10);
2121 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2122 i -= 10;
2123 }
2124
2125 CMOS_WRITE(save_control, RTC_CONTROL);
2126 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2127 clear_IO_APIC_pin(apic, pin);
2128
2129 ioapic_write_entry(apic, pin, entry0);
2130}
2131
2132/*
2133 * This code may look a bit paranoid, but it's supposed to cooperate with
2134 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2135 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2136 * fanatically on his truly buggy board.
2137 */
2138static inline void __init check_timer(void)
2139{
2140 int apic1, pin1, apic2, pin2;
2141 int no_pin1 = 0;
2142 int vector;
2143 unsigned int ver;
2144 unsigned long flags;
2145
2146 local_irq_save(flags);
2147
2148 ver = apic_read(APIC_LVR);
2149 ver = GET_APIC_VERSION(ver);
2150
2151 /*
2152 * get/set the timer IRQ vector:
2153 */
2154 disable_8259A_irq(0);
2155 vector = assign_irq_vector(0);
2156 set_intr_gate(vector, interrupt[0]);
2157
2158 /*
2159 * As IRQ0 is to be enabled in the 8259A, the virtual
2160 * wire has to be disabled in the local APIC. Also
2161 * timer interrupts need to be acknowledged manually in
2162 * the 8259A for the i82489DX when using the NMI
2163 * watchdog as that APIC treats NMIs as level-triggered.
2164 * The AEOI mode will finish them in the 8259A
2165 * automatically.
2166 */
2167 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2168 init_8259A(1);
2169 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
2170
2171 pin1 = find_isa_irq_pin(0, mp_INT);
2172 apic1 = find_isa_irq_apic(0, mp_INT);
2173 pin2 = ioapic_i8259.pin;
2174 apic2 = ioapic_i8259.apic;
2175
2176 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2177 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2178 vector, apic1, pin1, apic2, pin2);
2179
2180 /*
2181 * Some BIOS writers are clueless and report the ExtINTA
2182 * I/O APIC input from the cascaded 8259A as the timer
2183 * interrupt input. So just in case, if only one pin
2184 * was found above, try it both directly and through the
2185 * 8259A.
2186 */
2187 if (pin1 == -1) {
2188 pin1 = pin2;
2189 apic1 = apic2;
2190 no_pin1 = 1;
2191 } else if (pin2 == -1) {
2192 pin2 = pin1;
2193 apic2 = apic1;
2194 }
2195
2196 if (pin1 != -1) {
2197 /*
2198 * Ok, does IRQ0 through the IOAPIC work?
2199 */
2200 if (no_pin1) {
2201 add_pin_to_irq(0, apic1, pin1);
2202 setup_timer_IRQ0_pin(apic1, pin1, vector);
2203 }
2204 unmask_IO_APIC_irq(0);
2205 if (timer_irq_works()) {
2206 if (nmi_watchdog == NMI_IO_APIC) {
2207 setup_nmi();
2208 enable_8259A_irq(0);
2209 }
2210 if (disable_timer_pin_1 > 0)
2211 clear_IO_APIC_pin(0, pin1);
2212 goto out;
2213 }
2214 clear_IO_APIC_pin(apic1, pin1);
2215 if (!no_pin1)
2216 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2217 "8254 timer not connected to IO-APIC\n");
2218
2219 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2220 "(IRQ0) through the 8259A ...\n");
2221 apic_printk(APIC_QUIET, KERN_INFO
2222 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2223 /*
2224 * legacy devices should be connected to IO APIC #0
2225 */
2226 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
2227 setup_timer_IRQ0_pin(apic2, pin2, vector);
2228 unmask_IO_APIC_irq(0);
2229 enable_8259A_irq(0);
2230 if (timer_irq_works()) {
2231 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2232 timer_through_8259 = 1;
2233 if (nmi_watchdog == NMI_IO_APIC) {
2234 disable_8259A_irq(0);
2235 setup_nmi();
2236 enable_8259A_irq(0);
2237 }
2238 goto out;
2239 }
2240 /*
2241 * Cleanup, just in case ...
2242 */
2243 disable_8259A_irq(0);
2244 clear_IO_APIC_pin(apic2, pin2);
2245 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2246 }
2247
2248 if (nmi_watchdog == NMI_IO_APIC) {
2249 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
2250 "through the IO-APIC - disabling NMI Watchdog!\n");
2251 nmi_watchdog = NMI_NONE;
2252 }
2253 timer_ack = 0;
2254
2255 apic_printk(APIC_QUIET, KERN_INFO
2256 "...trying to set up timer as Virtual Wire IRQ...\n");
2257
2258 lapic_register_intr(0, vector);
2259 apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
2260 enable_8259A_irq(0);
2261
2262 if (timer_irq_works()) {
2263 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2264 goto out;
2265 }
2266 disable_8259A_irq(0);
2267 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
2268 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2269
2270 apic_printk(APIC_QUIET, KERN_INFO
2271 "...trying to set up timer as ExtINT IRQ...\n");
2272
2273 init_8259A(0);
2274 make_8259A_irq(0);
2275 apic_write(APIC_LVT0, APIC_DM_EXTINT);
2276
2277 unlock_ExtINT_logic();
2278
2279 if (timer_irq_works()) {
2280 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2281 goto out;
2282 }
2283 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2284 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2285 "report. Then try booting with the 'noapic' option.\n");
2286out:
2287 local_irq_restore(flags);
2288}
2289
2290/*
2291 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2292 * to devices. However there may be an I/O APIC pin available for
2293 * this interrupt regardless. The pin may be left unconnected, but
2294 * typically it will be reused as an ExtINT cascade interrupt for
2295 * the master 8259A. In the MPS case such a pin will normally be
2296 * reported as an ExtINT interrupt in the MP table. With ACPI
2297 * there is no provision for ExtINT interrupts, and in the absence
2298 * of an override it would be treated as an ordinary ISA I/O APIC
2299 * interrupt, that is edge-triggered and unmasked by default. We
2300 * used to do this, but it caused problems on some systems because
2301 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2302 * the same ExtINT cascade interrupt to drive the local APIC of the
2303 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2304 * the I/O APIC in all cases now. No actual device should request
2305 * it anyway. --macro
2306 */
2307#define PIC_IRQS (1 << PIC_CASCADE_IR)
2308
2309void __init setup_IO_APIC(void)
2310{
2311 int i;
2312
2313 /* Reserve all the system vectors. */
2314 for (i = first_system_vector; i < NR_VECTORS; i++)
2315 set_bit(i, used_vectors);
2316
2317 enable_IO_APIC();
2318
2319 io_apic_irqs = ~PIC_IRQS;
2320
2321 printk("ENABLING IO-APIC IRQs\n");
2322
2323 /*
2324 * Set up IO-APIC IRQ routing.
2325 */
2326 if (!acpi_ioapic)
2327 setup_ioapic_ids_from_mpc();
2328 sync_Arb_IDs();
2329 setup_IO_APIC_irqs();
2330 init_IO_APIC_traps();
2331 check_timer();
2332 if (!acpi_ioapic)
2333 print_IO_APIC();
2334}
2335
2336/*
2337 * Called after all the initialization is done. If we didnt find any
2338 * APIC bugs then we can allow the modify fast path
2339 */
2340
2341static int __init io_apic_bug_finalize(void)
2342{
2343 if (sis_apic_bug == -1)
2344 sis_apic_bug = 0;
2345 return 0;
2346}
2347
2348late_initcall(io_apic_bug_finalize);
2349
2350struct sysfs_ioapic_data {
2351 struct sys_device dev;
2352 struct IO_APIC_route_entry entry[0];
2353};
2354static struct sysfs_ioapic_data *mp_ioapic_data[MAX_IO_APICS];
2355
2356static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
2357{
2358 struct IO_APIC_route_entry *entry;
2359 struct sysfs_ioapic_data *data;
2360 int i;
2361
2362 data = container_of(dev, struct sysfs_ioapic_data, dev);
2363 entry = data->entry;
2364 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
2365 entry[i] = ioapic_read_entry(dev->id, i);
2366
2367 return 0;
2368}
2369
2370static int ioapic_resume(struct sys_device *dev)
2371{
2372 struct IO_APIC_route_entry *entry;
2373 struct sysfs_ioapic_data *data;
2374 unsigned long flags;
2375 union IO_APIC_reg_00 reg_00;
2376 int i;
2377
2378 data = container_of(dev, struct sysfs_ioapic_data, dev);
2379 entry = data->entry;
2380
2381 spin_lock_irqsave(&ioapic_lock, flags);
2382 reg_00.raw = io_apic_read(dev->id, 0);
2383 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
2384 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
2385 io_apic_write(dev->id, 0, reg_00.raw);
2386 }
2387 spin_unlock_irqrestore(&ioapic_lock, flags);
2388 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
2389 ioapic_write_entry(dev->id, i, entry[i]);
2390
2391 return 0;
2392}
2393
2394static struct sysdev_class ioapic_sysdev_class = {
2395 .name = "ioapic",
2396 .suspend = ioapic_suspend,
2397 .resume = ioapic_resume,
2398};
2399
2400static int __init ioapic_init_sysfs(void)
2401{
2402 struct sys_device *dev;
2403 int i, size, error = 0;
2404
2405 error = sysdev_class_register(&ioapic_sysdev_class);
2406 if (error)
2407 return error;
2408
2409 for (i = 0; i < nr_ioapics; i++) {
2410 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
2411 * sizeof(struct IO_APIC_route_entry);
2412 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
2413 if (!mp_ioapic_data[i]) {
2414 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2415 continue;
2416 }
2417 dev = &mp_ioapic_data[i]->dev;
2418 dev->id = i;
2419 dev->cls = &ioapic_sysdev_class;
2420 error = sysdev_register(dev);
2421 if (error) {
2422 kfree(mp_ioapic_data[i]);
2423 mp_ioapic_data[i] = NULL;
2424 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2425 continue;
2426 }
2427 }
2428
2429 return 0;
2430}
2431
2432device_initcall(ioapic_init_sysfs);
2433
2434/*
2435 * Dynamic irq allocate and deallocation
2436 */
2437int create_irq(void)
2438{
2439 /* Allocate an unused irq */
2440 int irq, new, vector = 0;
2441 unsigned long flags;
2442
2443 irq = -ENOSPC;
2444 spin_lock_irqsave(&vector_lock, flags);
2445 for (new = (NR_IRQS - 1); new >= 0; new--) {
2446 if (platform_legacy_irq(new))
2447 continue;
2448 if (irq_vector[new] != 0)
2449 continue;
2450 vector = __assign_irq_vector(new);
2451 if (likely(vector > 0))
2452 irq = new;
2453 break;
2454 }
2455 spin_unlock_irqrestore(&vector_lock, flags);
2456
2457 if (irq >= 0) {
2458 set_intr_gate(vector, interrupt[irq]);
2459 dynamic_irq_init(irq);
2460 }
2461 return irq;
2462}
2463
2464void destroy_irq(unsigned int irq)
2465{
2466 unsigned long flags;
2467
2468 dynamic_irq_cleanup(irq);
2469
2470 spin_lock_irqsave(&vector_lock, flags);
2471 clear_bit(irq_vector[irq], used_vectors);
2472 irq_vector[irq] = 0;
2473 spin_unlock_irqrestore(&vector_lock, flags);
2474}
2475
2476/*
2477 * MSI message composition
2478 */
2479#ifdef CONFIG_PCI_MSI
2480static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
2481{
2482 int vector;
2483 unsigned dest;
2484
2485 vector = assign_irq_vector(irq);
2486 if (vector >= 0) {
2487 dest = cpu_mask_to_apicid(TARGET_CPUS);
2488
2489 msg->address_hi = MSI_ADDR_BASE_HI;
2490 msg->address_lo =
2491 MSI_ADDR_BASE_LO |
2492 ((INT_DEST_MODE == 0) ?
2493MSI_ADDR_DEST_MODE_PHYSICAL:
2494 MSI_ADDR_DEST_MODE_LOGICAL) |
2495 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2496 MSI_ADDR_REDIRECTION_CPU:
2497 MSI_ADDR_REDIRECTION_LOWPRI) |
2498 MSI_ADDR_DEST_ID(dest);
2499
2500 msg->data =
2501 MSI_DATA_TRIGGER_EDGE |
2502 MSI_DATA_LEVEL_ASSERT |
2503 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2504MSI_DATA_DELIVERY_FIXED:
2505 MSI_DATA_DELIVERY_LOWPRI) |
2506 MSI_DATA_VECTOR(vector);
2507 }
2508 return vector;
2509}
2510
2511#ifdef CONFIG_SMP
2512static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2513{
2514 struct msi_msg msg;
2515 unsigned int dest;
2516 cpumask_t tmp;
2517 int vector;
2518
2519 cpus_and(tmp, mask, cpu_online_map);
2520 if (cpus_empty(tmp))
2521 tmp = TARGET_CPUS;
2522
2523 vector = assign_irq_vector(irq);
2524 if (vector < 0)
2525 return;
2526
2527 dest = cpu_mask_to_apicid(mask);
2528
2529 read_msi_msg(irq, &msg);
2530
2531 msg.data &= ~MSI_DATA_VECTOR_MASK;
2532 msg.data |= MSI_DATA_VECTOR(vector);
2533 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2534 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2535
2536 write_msi_msg(irq, &msg);
2537 irq_desc[irq].affinity = mask;
2538}
2539#endif /* CONFIG_SMP */
2540
2541/*
2542 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
2543 * which implement the MSI or MSI-X Capability Structure.
2544 */
2545static struct irq_chip msi_chip = {
2546 .name = "PCI-MSI",
2547 .unmask = unmask_msi_irq,
2548 .mask = mask_msi_irq,
2549 .ack = ack_ioapic_irq,
2550#ifdef CONFIG_SMP
2551 .set_affinity = set_msi_irq_affinity,
2552#endif
2553 .retrigger = ioapic_retrigger_irq,
2554};
2555
2556int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
2557{
2558 struct msi_msg msg;
2559 int irq, ret;
2560 irq = create_irq();
2561 if (irq < 0)
2562 return irq;
2563
2564 ret = msi_compose_msg(dev, irq, &msg);
2565 if (ret < 0) {
2566 destroy_irq(irq);
2567 return ret;
2568 }
2569
2570 set_irq_msi(irq, desc);
2571 write_msi_msg(irq, &msg);
2572
2573 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq,
2574 "edge");
2575
2576 return 0;
2577}
2578
2579void arch_teardown_msi_irq(unsigned int irq)
2580{
2581 destroy_irq(irq);
2582}
2583
2584#endif /* CONFIG_PCI_MSI */
2585
2586/*
2587 * Hypertransport interrupt support
2588 */
2589#ifdef CONFIG_HT_IRQ
2590
2591#ifdef CONFIG_SMP
2592
2593static void target_ht_irq(unsigned int irq, unsigned int dest)
2594{
2595 struct ht_irq_msg msg;
2596 fetch_ht_irq_msg(irq, &msg);
2597
2598 msg.address_lo &= ~(HT_IRQ_LOW_DEST_ID_MASK);
2599 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
2600
2601 msg.address_lo |= HT_IRQ_LOW_DEST_ID(dest);
2602 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
2603
2604 write_ht_irq_msg(irq, &msg);
2605}
2606
2607static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
2608{
2609 unsigned int dest;
2610 cpumask_t tmp;
2611
2612 cpus_and(tmp, mask, cpu_online_map);
2613 if (cpus_empty(tmp))
2614 tmp = TARGET_CPUS;
2615
2616 cpus_and(mask, tmp, CPU_MASK_ALL);
2617
2618 dest = cpu_mask_to_apicid(mask);
2619
2620 target_ht_irq(irq, dest);
2621 irq_desc[irq].affinity = mask;
2622}
2623#endif
2624
2625static struct irq_chip ht_irq_chip = {
2626 .name = "PCI-HT",
2627 .mask = mask_ht_irq,
2628 .unmask = unmask_ht_irq,
2629 .ack = ack_ioapic_irq,
2630#ifdef CONFIG_SMP
2631 .set_affinity = set_ht_irq_affinity,
2632#endif
2633 .retrigger = ioapic_retrigger_irq,
2634};
2635
2636int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
2637{
2638 int vector;
2639
2640 vector = assign_irq_vector(irq);
2641 if (vector >= 0) {
2642 struct ht_irq_msg msg;
2643 unsigned dest;
2644 cpumask_t tmp;
2645
2646 cpus_clear(tmp);
2647 cpu_set(vector >> 8, tmp);
2648 dest = cpu_mask_to_apicid(tmp);
2649
2650 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
2651
2652 msg.address_lo =
2653 HT_IRQ_LOW_BASE |
2654 HT_IRQ_LOW_DEST_ID(dest) |
2655 HT_IRQ_LOW_VECTOR(vector) |
2656 ((INT_DEST_MODE == 0) ?
2657 HT_IRQ_LOW_DM_PHYSICAL :
2658 HT_IRQ_LOW_DM_LOGICAL) |
2659 HT_IRQ_LOW_RQEOI_EDGE |
2660 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2661 HT_IRQ_LOW_MT_FIXED :
2662 HT_IRQ_LOW_MT_ARBITRATED) |
2663 HT_IRQ_LOW_IRQ_MASKED;
2664
2665 write_ht_irq_msg(irq, &msg);
2666
2667 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
2668 handle_edge_irq, "edge");
2669 }
2670 return vector;
2671}
2672#endif /* CONFIG_HT_IRQ */
2673
2674/* --------------------------------------------------------------------------
2675 ACPI-based IOAPIC Configuration
2676 -------------------------------------------------------------------------- */
2677
2678#ifdef CONFIG_ACPI
2679
2680int __init io_apic_get_unique_id(int ioapic, int apic_id)
2681{
2682 union IO_APIC_reg_00 reg_00;
2683 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
2684 physid_mask_t tmp;
2685 unsigned long flags;
2686 int i = 0;
2687
2688 /*
2689 * The P4 platform supports up to 256 APIC IDs on two separate APIC
2690 * buses (one for LAPICs, one for IOAPICs), where predecessors only
2691 * supports up to 16 on one shared APIC bus.
2692 *
2693 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
2694 * advantage of new APIC bus architecture.
2695 */
2696
2697 if (physids_empty(apic_id_map))
2698 apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
2699
2700 spin_lock_irqsave(&ioapic_lock, flags);
2701 reg_00.raw = io_apic_read(ioapic, 0);
2702 spin_unlock_irqrestore(&ioapic_lock, flags);
2703
2704 if (apic_id >= get_physical_broadcast()) {
2705 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
2706 "%d\n", ioapic, apic_id, reg_00.bits.ID);
2707 apic_id = reg_00.bits.ID;
2708 }
2709
2710 /*
2711 * Every APIC in a system must have a unique ID or we get lots of nice
2712 * 'stuck on smp_invalidate_needed IPI wait' messages.
2713 */
2714 if (check_apicid_used(apic_id_map, apic_id)) {
2715
2716 for (i = 0; i < get_physical_broadcast(); i++) {
2717 if (!check_apicid_used(apic_id_map, i))
2718 break;
2719 }
2720
2721 if (i == get_physical_broadcast())
2722 panic("Max apic_id exceeded!\n");
2723
2724 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
2725 "trying %d\n", ioapic, apic_id, i);
2726
2727 apic_id = i;
2728 }
2729
2730 tmp = apicid_to_cpu_present(apic_id);
2731 physids_or(apic_id_map, apic_id_map, tmp);
2732
2733 if (reg_00.bits.ID != apic_id) {
2734 reg_00.bits.ID = apic_id;
2735
2736 spin_lock_irqsave(&ioapic_lock, flags);
2737 io_apic_write(ioapic, 0, reg_00.raw);
2738 reg_00.raw = io_apic_read(ioapic, 0);
2739 spin_unlock_irqrestore(&ioapic_lock, flags);
2740
2741 /* Sanity check */
2742 if (reg_00.bits.ID != apic_id) {
2743 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
2744 return -1;
2745 }
2746 }
2747
2748 apic_printk(APIC_VERBOSE, KERN_INFO
2749 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
2750
2751 return apic_id;
2752}
2753
2754
2755int __init io_apic_get_version(int ioapic)
2756{
2757 union IO_APIC_reg_01 reg_01;
2758 unsigned long flags;
2759
2760 spin_lock_irqsave(&ioapic_lock, flags);
2761 reg_01.raw = io_apic_read(ioapic, 1);
2762 spin_unlock_irqrestore(&ioapic_lock, flags);
2763
2764 return reg_01.bits.version;
2765}
2766
2767
2768int __init io_apic_get_redir_entries(int ioapic)
2769{
2770 union IO_APIC_reg_01 reg_01;
2771 unsigned long flags;
2772
2773 spin_lock_irqsave(&ioapic_lock, flags);
2774 reg_01.raw = io_apic_read(ioapic, 1);
2775 spin_unlock_irqrestore(&ioapic_lock, flags);
2776
2777 return reg_01.bits.entries;
2778}
2779
2780
2781int io_apic_set_pci_routing(int ioapic, int pin, int irq, int edge_level, int active_high_low)
2782{
2783 struct IO_APIC_route_entry entry;
2784
2785 if (!IO_APIC_IRQ(irq)) {
2786 printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
2787 ioapic);
2788 return -EINVAL;
2789 }
2790
2791 /*
2792 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
2793 * Note that we mask (disable) IRQs now -- these get enabled when the
2794 * corresponding device driver registers for this IRQ.
2795 */
2796
2797 memset(&entry, 0, sizeof(entry));
2798
2799 entry.delivery_mode = INT_DELIVERY_MODE;
2800 entry.dest_mode = INT_DEST_MODE;
2801 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
2802 entry.trigger = edge_level;
2803 entry.polarity = active_high_low;
2804 entry.mask = 1;
2805
2806 /*
2807 * IRQs < 16 are already in the irq_2_pin[] map
2808 */
2809 if (irq >= 16)
2810 add_pin_to_irq(irq, ioapic, pin);
2811
2812 entry.vector = assign_irq_vector(irq);
2813
2814 apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
2815 "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
2816 mp_ioapics[ioapic].mp_apicid, pin, entry.vector, irq,
2817 edge_level, active_high_low);
2818
2819 ioapic_register_intr(irq, entry.vector, edge_level);
2820
2821 if (!ioapic && (irq < 16))
2822 disable_8259A_irq(irq);
2823
2824 ioapic_write_entry(ioapic, pin, entry);
2825
2826 return 0;
2827}
2828
2829int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
2830{
2831 int i;
2832
2833 if (skip_ioapic_setup)
2834 return -1;
2835
2836 for (i = 0; i < mp_irq_entries; i++)
2837 if (mp_irqs[i].mp_irqtype == mp_INT &&
2838 mp_irqs[i].mp_srcbusirq == bus_irq)
2839 break;
2840 if (i >= mp_irq_entries)
2841 return -1;
2842
2843 *trigger = irq_trigger(i);
2844 *polarity = irq_polarity(i);
2845 return 0;
2846}
2847
2848#endif /* CONFIG_ACPI */
2849
2850static int __init parse_disable_timer_pin_1(char *arg)
2851{
2852 disable_timer_pin_1 = 1;
2853 return 0;
2854}
2855early_param("disable_timer_pin_1", parse_disable_timer_pin_1);
2856
2857static int __init parse_enable_timer_pin_1(char *arg)
2858{
2859 disable_timer_pin_1 = -1;
2860 return 0;
2861}
2862early_param("enable_timer_pin_1", parse_enable_timer_pin_1);
2863
2864static int __init parse_noapic(char *arg)
2865{
2866 /* disable IO-APIC */
2867 disable_ioapic_setup();
2868 return 0;
2869}
2870early_param("noapic", parse_noapic);
2871
2872void __init ioapic_init_mappings(void)
2873{
2874 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
2875 int i;
2876
2877 for (i = 0; i < nr_ioapics; i++) {
2878 if (smp_found_config) {
2879 ioapic_phys = mp_ioapics[i].mp_apicaddr;
2880 if (!ioapic_phys) {
2881 printk(KERN_ERR
2882 "WARNING: bogus zero IO-APIC "
2883 "address found in MPTABLE, "
2884 "disabling IO/APIC support!\n");
2885 smp_found_config = 0;
2886 skip_ioapic_setup = 1;
2887 goto fake_ioapic_page;
2888 }
2889 } else {
2890fake_ioapic_page:
2891 ioapic_phys = (unsigned long)
2892 alloc_bootmem_pages(PAGE_SIZE);
2893 ioapic_phys = __pa(ioapic_phys);
2894 }
2895 set_fixmap_nocache(idx, ioapic_phys);
2896 printk(KERN_DEBUG "mapped IOAPIC to %08lx (%08lx)\n",
2897 __fix_to_virt(idx), ioapic_phys);
2898 idx++;
2899 }
2900}
2901
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index 50e5e4a31c85..191914302744 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/thread_info.h> 15#include <linux/thread_info.h>
16#include <linux/syscalls.h> 16#include <linux/syscalls.h>
17#include <asm/syscalls.h>
17 18
18/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ 19/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
19static void set_bitmap(unsigned long *bitmap, unsigned int base, 20static void set_bitmap(unsigned long *bitmap, unsigned int base,
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c
index 3f7537b669d3..f1c688e46f35 100644
--- a/arch/x86/kernel/ipi.c
+++ b/arch/x86/kernel/ipi.c
@@ -20,6 +20,8 @@
20 20
21#ifdef CONFIG_X86_32 21#ifdef CONFIG_X86_32
22#include <mach_apic.h> 22#include <mach_apic.h>
23#include <mach_ipi.h>
24
23/* 25/*
24 * the following functions deal with sending IPIs between CPUs. 26 * the following functions deal with sending IPIs between CPUs.
25 * 27 *
@@ -147,7 +149,6 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector)
147} 149}
148 150
149/* must come after the send_IPI functions above for inlining */ 151/* must come after the send_IPI functions above for inlining */
150#include <mach_ipi.h>
151static int convert_apicid_to_cpu(int apic_id) 152static int convert_apicid_to_cpu(int apic_id)
152{ 153{
153 int i; 154 int i;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
new file mode 100644
index 000000000000..ccf6c503fc3b
--- /dev/null
+++ b/arch/x86/kernel/irq.c
@@ -0,0 +1,189 @@
1/*
2 * Common interrupt code for 32 and 64 bit
3 */
4#include <linux/cpu.h>
5#include <linux/interrupt.h>
6#include <linux/kernel_stat.h>
7#include <linux/seq_file.h>
8
9#include <asm/apic.h>
10#include <asm/io_apic.h>
11#include <asm/smp.h>
12
13atomic_t irq_err_count;
14
15/*
16 * 'what should we do if we get a hw irq event on an illegal vector'.
17 * each architecture has to answer this themselves.
18 */
19void ack_bad_irq(unsigned int irq)
20{
21 printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
22
23#ifdef CONFIG_X86_LOCAL_APIC
24 /*
25 * Currently unexpected vectors happen only on SMP and APIC.
26 * We _must_ ack these because every local APIC has only N
27 * irq slots per priority level, and a 'hanging, unacked' IRQ
28 * holds up an irq slot - in excessive cases (when multiple
29 * unexpected vectors occur) that might lock up the APIC
30 * completely.
31 * But only ack when the APIC is enabled -AK
32 */
33 if (cpu_has_apic)
34 ack_APIC_irq();
35#endif
36}
37
38#ifdef CONFIG_X86_32
39# define irq_stats(x) (&per_cpu(irq_stat,x))
40#else
41# define irq_stats(x) cpu_pda(x)
42#endif
43/*
44 * /proc/interrupts printing:
45 */
46static int show_other_interrupts(struct seq_file *p)
47{
48 int j;
49
50 seq_printf(p, "NMI: ");
51 for_each_online_cpu(j)
52 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
53 seq_printf(p, " Non-maskable interrupts\n");
54#ifdef CONFIG_X86_LOCAL_APIC
55 seq_printf(p, "LOC: ");
56 for_each_online_cpu(j)
57 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
58 seq_printf(p, " Local timer interrupts\n");
59#endif
60#ifdef CONFIG_SMP
61 seq_printf(p, "RES: ");
62 for_each_online_cpu(j)
63 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
64 seq_printf(p, " Rescheduling interrupts\n");
65 seq_printf(p, "CAL: ");
66 for_each_online_cpu(j)
67 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
68 seq_printf(p, " Function call interrupts\n");
69 seq_printf(p, "TLB: ");
70 for_each_online_cpu(j)
71 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
72 seq_printf(p, " TLB shootdowns\n");
73#endif
74#ifdef CONFIG_X86_MCE
75 seq_printf(p, "TRM: ");
76 for_each_online_cpu(j)
77 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
78 seq_printf(p, " Thermal event interrupts\n");
79# ifdef CONFIG_X86_64
80 seq_printf(p, "THR: ");
81 for_each_online_cpu(j)
82 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
83 seq_printf(p, " Threshold APIC interrupts\n");
84# endif
85#endif
86#ifdef CONFIG_X86_LOCAL_APIC
87 seq_printf(p, "SPU: ");
88 for_each_online_cpu(j)
89 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
90 seq_printf(p, " Spurious interrupts\n");
91#endif
92 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
93#if defined(CONFIG_X86_IO_APIC)
94 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
95#endif
96 return 0;
97}
98
99int show_interrupts(struct seq_file *p, void *v)
100{
101 unsigned long flags, any_count = 0;
102 int i = *(loff_t *) v, j;
103 struct irqaction *action;
104 struct irq_desc *desc;
105
106 if (i > nr_irqs)
107 return 0;
108
109 if (i == nr_irqs)
110 return show_other_interrupts(p);
111
112 /* print header */
113 if (i == 0) {
114 seq_printf(p, " ");
115 for_each_online_cpu(j)
116 seq_printf(p, "CPU%-8d",j);
117 seq_putc(p, '\n');
118 }
119
120 desc = irq_to_desc(i);
121 spin_lock_irqsave(&desc->lock, flags);
122#ifndef CONFIG_SMP
123 any_count = kstat_irqs(i);
124#else
125 for_each_online_cpu(j)
126 any_count |= kstat_irqs_cpu(i, j);
127#endif
128 action = desc->action;
129 if (!action && !any_count)
130 goto out;
131
132 seq_printf(p, "%3d: ", i);
133#ifndef CONFIG_SMP
134 seq_printf(p, "%10u ", kstat_irqs(i));
135#else
136 for_each_online_cpu(j)
137 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
138#endif
139 seq_printf(p, " %8s", desc->chip->name);
140 seq_printf(p, "-%-8s", desc->name);
141
142 if (action) {
143 seq_printf(p, " %s", action->name);
144 while ((action = action->next) != NULL)
145 seq_printf(p, ", %s", action->name);
146 }
147
148 seq_putc(p, '\n');
149out:
150 spin_unlock_irqrestore(&desc->lock, flags);
151 return 0;
152}
153
154/*
155 * /proc/stat helpers
156 */
157u64 arch_irq_stat_cpu(unsigned int cpu)
158{
159 u64 sum = irq_stats(cpu)->__nmi_count;
160
161#ifdef CONFIG_X86_LOCAL_APIC
162 sum += irq_stats(cpu)->apic_timer_irqs;
163#endif
164#ifdef CONFIG_SMP
165 sum += irq_stats(cpu)->irq_resched_count;
166 sum += irq_stats(cpu)->irq_call_count;
167 sum += irq_stats(cpu)->irq_tlb_count;
168#endif
169#ifdef CONFIG_X86_MCE
170 sum += irq_stats(cpu)->irq_thermal_count;
171# ifdef CONFIG_X86_64
172 sum += irq_stats(cpu)->irq_threshold_count;
173#endif
174#endif
175#ifdef CONFIG_X86_LOCAL_APIC
176 sum += irq_stats(cpu)->irq_spurious_count;
177#endif
178 return sum;
179}
180
181u64 arch_irq_stat(void)
182{
183 u64 sum = atomic_read(&irq_err_count);
184
185#ifdef CONFIG_X86_IO_APIC
186 sum += atomic_read(&irq_mis_count);
187#endif
188 return sum;
189}
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 1cf8c1fcc088..a51382672de0 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -25,29 +25,6 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25DEFINE_PER_CPU(struct pt_regs *, irq_regs); 25DEFINE_PER_CPU(struct pt_regs *, irq_regs);
26EXPORT_PER_CPU_SYMBOL(irq_regs); 26EXPORT_PER_CPU_SYMBOL(irq_regs);
27 27
28/*
29 * 'what should we do if we get a hw irq event on an illegal vector'.
30 * each architecture has to answer this themselves.
31 */
32void ack_bad_irq(unsigned int irq)
33{
34 printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
35
36#ifdef CONFIG_X86_LOCAL_APIC
37 /*
38 * Currently unexpected vectors happen only on SMP and APIC.
39 * We _must_ ack these because every local APIC has only N
40 * irq slots per priority level, and a 'hanging, unacked' IRQ
41 * holds up an irq slot - in excessive cases (when multiple
42 * unexpected vectors occur) that might lock up the APIC
43 * completely.
44 * But only ack when the APIC is enabled -AK
45 */
46 if (cpu_has_apic)
47 ack_APIC_irq();
48#endif
49}
50
51#ifdef CONFIG_DEBUG_STACKOVERFLOW 28#ifdef CONFIG_DEBUG_STACKOVERFLOW
52/* Debugging check for stack overflow: is there less than 1KB free? */ 29/* Debugging check for stack overflow: is there less than 1KB free? */
53static int check_stack_overflow(void) 30static int check_stack_overflow(void)
@@ -223,20 +200,25 @@ unsigned int do_IRQ(struct pt_regs *regs)
223{ 200{
224 struct pt_regs *old_regs; 201 struct pt_regs *old_regs;
225 /* high bit used in ret_from_ code */ 202 /* high bit used in ret_from_ code */
226 int overflow, irq = ~regs->orig_ax; 203 int overflow;
227 struct irq_desc *desc = irq_desc + irq; 204 unsigned vector = ~regs->orig_ax;
205 struct irq_desc *desc;
206 unsigned irq;
228 207
229 if (unlikely((unsigned)irq >= NR_IRQS)) {
230 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
231 __func__, irq);
232 BUG();
233 }
234 208
235 old_regs = set_irq_regs(regs); 209 old_regs = set_irq_regs(regs);
236 irq_enter(); 210 irq_enter();
211 irq = __get_cpu_var(vector_irq)[vector];
237 212
238 overflow = check_stack_overflow(); 213 overflow = check_stack_overflow();
239 214
215 desc = irq_to_desc(irq);
216 if (unlikely(!desc)) {
217 printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x cpu %d\n",
218 __func__, irq, vector, smp_processor_id());
219 BUG();
220 }
221
240 if (!execute_on_irq_stack(overflow, desc, irq)) { 222 if (!execute_on_irq_stack(overflow, desc, irq)) {
241 if (unlikely(overflow)) 223 if (unlikely(overflow))
242 print_stack_overflow(); 224 print_stack_overflow();
@@ -248,146 +230,6 @@ unsigned int do_IRQ(struct pt_regs *regs)
248 return 1; 230 return 1;
249} 231}
250 232
251/*
252 * Interrupt statistics:
253 */
254
255atomic_t irq_err_count;
256
257/*
258 * /proc/interrupts printing:
259 */
260
261int show_interrupts(struct seq_file *p, void *v)
262{
263 int i = *(loff_t *) v, j;
264 struct irqaction * action;
265 unsigned long flags;
266
267 if (i == 0) {
268 seq_printf(p, " ");
269 for_each_online_cpu(j)
270 seq_printf(p, "CPU%-8d",j);
271 seq_putc(p, '\n');
272 }
273
274 if (i < NR_IRQS) {
275 unsigned any_count = 0;
276
277 spin_lock_irqsave(&irq_desc[i].lock, flags);
278#ifndef CONFIG_SMP
279 any_count = kstat_irqs(i);
280#else
281 for_each_online_cpu(j)
282 any_count |= kstat_cpu(j).irqs[i];
283#endif
284 action = irq_desc[i].action;
285 if (!action && !any_count)
286 goto skip;
287 seq_printf(p, "%3d: ",i);
288#ifndef CONFIG_SMP
289 seq_printf(p, "%10u ", kstat_irqs(i));
290#else
291 for_each_online_cpu(j)
292 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
293#endif
294 seq_printf(p, " %8s", irq_desc[i].chip->name);
295 seq_printf(p, "-%-8s", irq_desc[i].name);
296
297 if (action) {
298 seq_printf(p, " %s", action->name);
299 while ((action = action->next) != NULL)
300 seq_printf(p, ", %s", action->name);
301 }
302
303 seq_putc(p, '\n');
304skip:
305 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
306 } else if (i == NR_IRQS) {
307 seq_printf(p, "NMI: ");
308 for_each_online_cpu(j)
309 seq_printf(p, "%10u ", nmi_count(j));
310 seq_printf(p, " Non-maskable interrupts\n");
311#ifdef CONFIG_X86_LOCAL_APIC
312 seq_printf(p, "LOC: ");
313 for_each_online_cpu(j)
314 seq_printf(p, "%10u ",
315 per_cpu(irq_stat,j).apic_timer_irqs);
316 seq_printf(p, " Local timer interrupts\n");
317#endif
318#ifdef CONFIG_SMP
319 seq_printf(p, "RES: ");
320 for_each_online_cpu(j)
321 seq_printf(p, "%10u ",
322 per_cpu(irq_stat,j).irq_resched_count);
323 seq_printf(p, " Rescheduling interrupts\n");
324 seq_printf(p, "CAL: ");
325 for_each_online_cpu(j)
326 seq_printf(p, "%10u ",
327 per_cpu(irq_stat,j).irq_call_count);
328 seq_printf(p, " function call interrupts\n");
329 seq_printf(p, "TLB: ");
330 for_each_online_cpu(j)
331 seq_printf(p, "%10u ",
332 per_cpu(irq_stat,j).irq_tlb_count);
333 seq_printf(p, " TLB shootdowns\n");
334#endif
335#ifdef CONFIG_X86_MCE
336 seq_printf(p, "TRM: ");
337 for_each_online_cpu(j)
338 seq_printf(p, "%10u ",
339 per_cpu(irq_stat,j).irq_thermal_count);
340 seq_printf(p, " Thermal event interrupts\n");
341#endif
342#ifdef CONFIG_X86_LOCAL_APIC
343 seq_printf(p, "SPU: ");
344 for_each_online_cpu(j)
345 seq_printf(p, "%10u ",
346 per_cpu(irq_stat,j).irq_spurious_count);
347 seq_printf(p, " Spurious interrupts\n");
348#endif
349 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
350#if defined(CONFIG_X86_IO_APIC)
351 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
352#endif
353 }
354 return 0;
355}
356
357/*
358 * /proc/stat helpers
359 */
360u64 arch_irq_stat_cpu(unsigned int cpu)
361{
362 u64 sum = nmi_count(cpu);
363
364#ifdef CONFIG_X86_LOCAL_APIC
365 sum += per_cpu(irq_stat, cpu).apic_timer_irqs;
366#endif
367#ifdef CONFIG_SMP
368 sum += per_cpu(irq_stat, cpu).irq_resched_count;
369 sum += per_cpu(irq_stat, cpu).irq_call_count;
370 sum += per_cpu(irq_stat, cpu).irq_tlb_count;
371#endif
372#ifdef CONFIG_X86_MCE
373 sum += per_cpu(irq_stat, cpu).irq_thermal_count;
374#endif
375#ifdef CONFIG_X86_LOCAL_APIC
376 sum += per_cpu(irq_stat, cpu).irq_spurious_count;
377#endif
378 return sum;
379}
380
381u64 arch_irq_stat(void)
382{
383 u64 sum = atomic_read(&irq_err_count);
384
385#ifdef CONFIG_X86_IO_APIC
386 sum += atomic_read(&irq_mis_count);
387#endif
388 return sum;
389}
390
391#ifdef CONFIG_HOTPLUG_CPU 233#ifdef CONFIG_HOTPLUG_CPU
392#include <mach_apic.h> 234#include <mach_apic.h>
393 235
@@ -395,20 +237,22 @@ void fixup_irqs(cpumask_t map)
395{ 237{
396 unsigned int irq; 238 unsigned int irq;
397 static int warned; 239 static int warned;
240 struct irq_desc *desc;
398 241
399 for (irq = 0; irq < NR_IRQS; irq++) { 242 for_each_irq_desc(irq, desc) {
400 cpumask_t mask; 243 cpumask_t mask;
244
401 if (irq == 2) 245 if (irq == 2)
402 continue; 246 continue;
403 247
404 cpus_and(mask, irq_desc[irq].affinity, map); 248 cpus_and(mask, desc->affinity, map);
405 if (any_online_cpu(mask) == NR_CPUS) { 249 if (any_online_cpu(mask) == NR_CPUS) {
406 printk("Breaking affinity for irq %i\n", irq); 250 printk("Breaking affinity for irq %i\n", irq);
407 mask = map; 251 mask = map;
408 } 252 }
409 if (irq_desc[irq].chip->set_affinity) 253 if (desc->chip->set_affinity)
410 irq_desc[irq].chip->set_affinity(irq, mask); 254 desc->chip->set_affinity(irq, mask);
411 else if (irq_desc[irq].action && !(warned++)) 255 else if (desc->action && !(warned++))
412 printk("Cannot set affinity for irq %i\n", irq); 256 printk("Cannot set affinity for irq %i\n", irq);
413 } 257 }
414 258
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 1f78b238d8d2..60eb84eb77a0 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -18,28 +18,6 @@
18#include <asm/idle.h> 18#include <asm/idle.h>
19#include <asm/smp.h> 19#include <asm/smp.h>
20 20
21atomic_t irq_err_count;
22
23/*
24 * 'what should we do if we get a hw irq event on an illegal vector'.
25 * each architecture has to answer this themselves.
26 */
27void ack_bad_irq(unsigned int irq)
28{
29 printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq);
30 /*
31 * Currently unexpected vectors happen only on SMP and APIC.
32 * We _must_ ack these because every local APIC has only N
33 * irq slots per priority level, and a 'hanging, unacked' IRQ
34 * holds up an irq slot - in excessive cases (when multiple
35 * unexpected vectors occur) that might lock up the APIC
36 * completely.
37 * But don't ack when the APIC is disabled. -AK
38 */
39 if (!disable_apic)
40 ack_APIC_irq();
41}
42
43#ifdef CONFIG_DEBUG_STACKOVERFLOW 21#ifdef CONFIG_DEBUG_STACKOVERFLOW
44/* 22/*
45 * Probabilistic stack overflow check: 23 * Probabilistic stack overflow check:
@@ -65,122 +43,6 @@ static inline void stack_overflow_check(struct pt_regs *regs)
65#endif 43#endif
66 44
67/* 45/*
68 * Generic, controller-independent functions:
69 */
70
71int show_interrupts(struct seq_file *p, void *v)
72{
73 int i = *(loff_t *) v, j;
74 struct irqaction * action;
75 unsigned long flags;
76
77 if (i == 0) {
78 seq_printf(p, " ");
79 for_each_online_cpu(j)
80 seq_printf(p, "CPU%-8d",j);
81 seq_putc(p, '\n');
82 }
83
84 if (i < NR_IRQS) {
85 unsigned any_count = 0;
86
87 spin_lock_irqsave(&irq_desc[i].lock, flags);
88#ifndef CONFIG_SMP
89 any_count = kstat_irqs(i);
90#else
91 for_each_online_cpu(j)
92 any_count |= kstat_cpu(j).irqs[i];
93#endif
94 action = irq_desc[i].action;
95 if (!action && !any_count)
96 goto skip;
97 seq_printf(p, "%3d: ",i);
98#ifndef CONFIG_SMP
99 seq_printf(p, "%10u ", kstat_irqs(i));
100#else
101 for_each_online_cpu(j)
102 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
103#endif
104 seq_printf(p, " %8s", irq_desc[i].chip->name);
105 seq_printf(p, "-%-8s", irq_desc[i].name);
106
107 if (action) {
108 seq_printf(p, " %s", action->name);
109 while ((action = action->next) != NULL)
110 seq_printf(p, ", %s", action->name);
111 }
112 seq_putc(p, '\n');
113skip:
114 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
115 } else if (i == NR_IRQS) {
116 seq_printf(p, "NMI: ");
117 for_each_online_cpu(j)
118 seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
119 seq_printf(p, " Non-maskable interrupts\n");
120 seq_printf(p, "LOC: ");
121 for_each_online_cpu(j)
122 seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
123 seq_printf(p, " Local timer interrupts\n");
124#ifdef CONFIG_SMP
125 seq_printf(p, "RES: ");
126 for_each_online_cpu(j)
127 seq_printf(p, "%10u ", cpu_pda(j)->irq_resched_count);
128 seq_printf(p, " Rescheduling interrupts\n");
129 seq_printf(p, "CAL: ");
130 for_each_online_cpu(j)
131 seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count);
132 seq_printf(p, " function call interrupts\n");
133 seq_printf(p, "TLB: ");
134 for_each_online_cpu(j)
135 seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count);
136 seq_printf(p, " TLB shootdowns\n");
137#endif
138#ifdef CONFIG_X86_MCE
139 seq_printf(p, "TRM: ");
140 for_each_online_cpu(j)
141 seq_printf(p, "%10u ", cpu_pda(j)->irq_thermal_count);
142 seq_printf(p, " Thermal event interrupts\n");
143 seq_printf(p, "THR: ");
144 for_each_online_cpu(j)
145 seq_printf(p, "%10u ", cpu_pda(j)->irq_threshold_count);
146 seq_printf(p, " Threshold APIC interrupts\n");
147#endif
148 seq_printf(p, "SPU: ");
149 for_each_online_cpu(j)
150 seq_printf(p, "%10u ", cpu_pda(j)->irq_spurious_count);
151 seq_printf(p, " Spurious interrupts\n");
152 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
153 }
154 return 0;
155}
156
157/*
158 * /proc/stat helpers
159 */
160u64 arch_irq_stat_cpu(unsigned int cpu)
161{
162 u64 sum = cpu_pda(cpu)->__nmi_count;
163
164 sum += cpu_pda(cpu)->apic_timer_irqs;
165#ifdef CONFIG_SMP
166 sum += cpu_pda(cpu)->irq_resched_count;
167 sum += cpu_pda(cpu)->irq_call_count;
168 sum += cpu_pda(cpu)->irq_tlb_count;
169#endif
170#ifdef CONFIG_X86_MCE
171 sum += cpu_pda(cpu)->irq_thermal_count;
172 sum += cpu_pda(cpu)->irq_threshold_count;
173#endif
174 sum += cpu_pda(cpu)->irq_spurious_count;
175 return sum;
176}
177
178u64 arch_irq_stat(void)
179{
180 return atomic_read(&irq_err_count);
181}
182
183/*
184 * do_IRQ handles all normal device IRQ's (the special 46 * do_IRQ handles all normal device IRQ's (the special
185 * SMP cross-CPU interrupts have their own specific 47 * SMP cross-CPU interrupts have their own specific
186 * handlers). 48 * handlers).
@@ -188,6 +50,7 @@ u64 arch_irq_stat(void)
188asmlinkage unsigned int do_IRQ(struct pt_regs *regs) 50asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
189{ 51{
190 struct pt_regs *old_regs = set_irq_regs(regs); 52 struct pt_regs *old_regs = set_irq_regs(regs);
53 struct irq_desc *desc;
191 54
192 /* high bit used in ret_from_ code */ 55 /* high bit used in ret_from_ code */
193 unsigned vector = ~regs->orig_ax; 56 unsigned vector = ~regs->orig_ax;
@@ -201,8 +64,9 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
201 stack_overflow_check(regs); 64 stack_overflow_check(regs);
202#endif 65#endif
203 66
204 if (likely(irq < NR_IRQS)) 67 desc = irq_to_desc(irq);
205 generic_handle_irq(irq); 68 if (likely(desc))
69 generic_handle_irq_desc(irq, desc);
206 else { 70 else {
207 if (!disable_apic) 71 if (!disable_apic)
208 ack_APIC_irq(); 72 ack_APIC_irq();
@@ -223,8 +87,9 @@ void fixup_irqs(cpumask_t map)
223{ 87{
224 unsigned int irq; 88 unsigned int irq;
225 static int warned; 89 static int warned;
90 struct irq_desc *desc;
226 91
227 for (irq = 0; irq < NR_IRQS; irq++) { 92 for_each_irq_desc(irq, desc) {
228 cpumask_t mask; 93 cpumask_t mask;
229 int break_affinity = 0; 94 int break_affinity = 0;
230 int set_affinity = 1; 95 int set_affinity = 1;
@@ -233,32 +98,32 @@ void fixup_irqs(cpumask_t map)
233 continue; 98 continue;
234 99
235 /* interrupt's are disabled at this point */ 100 /* interrupt's are disabled at this point */
236 spin_lock(&irq_desc[irq].lock); 101 spin_lock(&desc->lock);
237 102
238 if (!irq_has_action(irq) || 103 if (!irq_has_action(irq) ||
239 cpus_equal(irq_desc[irq].affinity, map)) { 104 cpus_equal(desc->affinity, map)) {
240 spin_unlock(&irq_desc[irq].lock); 105 spin_unlock(&desc->lock);
241 continue; 106 continue;
242 } 107 }
243 108
244 cpus_and(mask, irq_desc[irq].affinity, map); 109 cpus_and(mask, desc->affinity, map);
245 if (cpus_empty(mask)) { 110 if (cpus_empty(mask)) {
246 break_affinity = 1; 111 break_affinity = 1;
247 mask = map; 112 mask = map;
248 } 113 }
249 114
250 if (irq_desc[irq].chip->mask) 115 if (desc->chip->mask)
251 irq_desc[irq].chip->mask(irq); 116 desc->chip->mask(irq);
252 117
253 if (irq_desc[irq].chip->set_affinity) 118 if (desc->chip->set_affinity)
254 irq_desc[irq].chip->set_affinity(irq, mask); 119 desc->chip->set_affinity(irq, mask);
255 else if (!(warned++)) 120 else if (!(warned++))
256 set_affinity = 0; 121 set_affinity = 0;
257 122
258 if (irq_desc[irq].chip->unmask) 123 if (desc->chip->unmask)
259 irq_desc[irq].chip->unmask(irq); 124 desc->chip->unmask(irq);
260 125
261 spin_unlock(&irq_desc[irq].lock); 126 spin_unlock(&desc->lock);
262 127
263 if (break_affinity && set_affinity) 128 if (break_affinity && set_affinity)
264 printk("Broke affinity for irq %i\n", irq); 129 printk("Broke affinity for irq %i\n", irq);
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index d66914287ee1..845aa9803e80 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -69,11 +69,48 @@ void __init init_ISA_irqs (void)
69 * 16 old-style INTA-cycle interrupts: 69 * 16 old-style INTA-cycle interrupts:
70 */ 70 */
71 for (i = 0; i < 16; i++) { 71 for (i = 0; i < 16; i++) {
72 /* first time call this irq_desc */
73 struct irq_desc *desc = irq_to_desc(i);
74
75 desc->status = IRQ_DISABLED;
76 desc->action = NULL;
77 desc->depth = 1;
78
72 set_irq_chip_and_handler_name(i, &i8259A_chip, 79 set_irq_chip_and_handler_name(i, &i8259A_chip,
73 handle_level_irq, "XT"); 80 handle_level_irq, "XT");
74 } 81 }
75} 82}
76 83
84/*
85 * IRQ2 is cascade interrupt to second interrupt controller
86 */
87static struct irqaction irq2 = {
88 .handler = no_action,
89 .mask = CPU_MASK_NONE,
90 .name = "cascade",
91};
92
93DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
94 [0 ... IRQ0_VECTOR - 1] = -1,
95 [IRQ0_VECTOR] = 0,
96 [IRQ1_VECTOR] = 1,
97 [IRQ2_VECTOR] = 2,
98 [IRQ3_VECTOR] = 3,
99 [IRQ4_VECTOR] = 4,
100 [IRQ5_VECTOR] = 5,
101 [IRQ6_VECTOR] = 6,
102 [IRQ7_VECTOR] = 7,
103 [IRQ8_VECTOR] = 8,
104 [IRQ9_VECTOR] = 9,
105 [IRQ10_VECTOR] = 10,
106 [IRQ11_VECTOR] = 11,
107 [IRQ12_VECTOR] = 12,
108 [IRQ13_VECTOR] = 13,
109 [IRQ14_VECTOR] = 14,
110 [IRQ15_VECTOR] = 15,
111 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
112};
113
77/* Overridden in paravirt.c */ 114/* Overridden in paravirt.c */
78void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); 115void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
79 116
@@ -89,15 +126,50 @@ void __init native_init_IRQ(void)
89 * us. (some of these will be overridden and become 126 * us. (some of these will be overridden and become
90 * 'special' SMP interrupts) 127 * 'special' SMP interrupts)
91 */ 128 */
92 for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) { 129 for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
93 int vector = FIRST_EXTERNAL_VECTOR + i;
94 if (i >= NR_IRQS)
95 break;
96 /* SYSCALL_VECTOR was reserved in trap_init. */ 130 /* SYSCALL_VECTOR was reserved in trap_init. */
97 if (!test_bit(vector, used_vectors)) 131 if (i != SYSCALL_VECTOR)
98 set_intr_gate(vector, interrupt[i]); 132 set_intr_gate(i, interrupt[i]);
99 } 133 }
100 134
135
136#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP)
137 /*
138 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
139 * IPI, driven by wakeup.
140 */
141 alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
142
143 /* IPI for invalidation */
144 alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
145
146 /* IPI for generic function call */
147 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
148
149 /* IPI for single call function */
150 set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt);
151
152 /* Low priority IPI to cleanup after moving an irq */
153 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
154#endif
155
156#ifdef CONFIG_X86_LOCAL_APIC
157 /* self generated IPI for local APIC timer */
158 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
159
160 /* IPI vectors for APIC spurious and error interrupts */
161 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
162 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
163#endif
164
165#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL)
166 /* thermal monitor LVT interrupt */
167 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
168#endif
169
170 if (!acpi_ioapic)
171 setup_irq(2, &irq2);
172
101 /* setup after call gates are initialised (usually add in 173 /* setup after call gates are initialised (usually add in
102 * the architecture specific gates) 174 * the architecture specific gates)
103 */ 175 */
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c
index 1f26fd9ec4f4..ff0235391285 100644
--- a/arch/x86/kernel/irqinit_64.c
+++ b/arch/x86/kernel/irqinit_64.c
@@ -135,51 +135,33 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
135 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 135 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
136}; 136};
137 137
138static void __init init_ISA_irqs (void) 138void __init init_ISA_irqs(void)
139{ 139{
140 int i; 140 int i;
141 141
142 init_bsp_APIC(); 142 init_bsp_APIC();
143 init_8259A(0); 143 init_8259A(0);
144 144
145 for (i = 0; i < NR_IRQS; i++) { 145 for (i = 0; i < 16; i++) {
146 irq_desc[i].status = IRQ_DISABLED; 146 /* first time call this irq_desc */
147 irq_desc[i].action = NULL; 147 struct irq_desc *desc = irq_to_desc(i);
148 irq_desc[i].depth = 1;
149 148
150 if (i < 16) { 149 desc->status = IRQ_DISABLED;
151 /* 150 desc->action = NULL;
152 * 16 old-style INTA-cycle interrupts: 151 desc->depth = 1;
153 */ 152
154 set_irq_chip_and_handler_name(i, &i8259A_chip, 153 /*
154 * 16 old-style INTA-cycle interrupts:
155 */
156 set_irq_chip_and_handler_name(i, &i8259A_chip,
155 handle_level_irq, "XT"); 157 handle_level_irq, "XT");
156 } else {
157 /*
158 * 'high' PCI IRQs filled in on demand
159 */
160 irq_desc[i].chip = &no_irq_chip;
161 }
162 } 158 }
163} 159}
164 160
165void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); 161void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
166 162
167void __init native_init_IRQ(void) 163static void __init smp_intr_init(void)
168{ 164{
169 int i;
170
171 init_ISA_irqs();
172 /*
173 * Cover the whole vector space, no vector can escape
174 * us. (some of these will be overridden and become
175 * 'special' SMP interrupts)
176 */
177 for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
178 int vector = FIRST_EXTERNAL_VECTOR + i;
179 if (vector != IA32_SYSCALL_VECTOR)
180 set_intr_gate(vector, interrupt[i]);
181 }
182
183#ifdef CONFIG_SMP 165#ifdef CONFIG_SMP
184 /* 166 /*
185 * The reschedule interrupt is a CPU-to-CPU reschedule-helper 167 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
@@ -207,6 +189,12 @@ void __init native_init_IRQ(void)
207 /* Low priority IPI to cleanup after moving an irq */ 189 /* Low priority IPI to cleanup after moving an irq */
208 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); 190 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
209#endif 191#endif
192}
193
194static void __init apic_intr_init(void)
195{
196 smp_intr_init();
197
210 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); 198 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
211 alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); 199 alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
212 200
@@ -216,6 +204,25 @@ void __init native_init_IRQ(void)
216 /* IPI vectors for APIC spurious and error interrupts */ 204 /* IPI vectors for APIC spurious and error interrupts */
217 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); 205 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
218 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); 206 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
207}
208
209void __init native_init_IRQ(void)
210{
211 int i;
212
213 init_ISA_irqs();
214 /*
215 * Cover the whole vector space, no vector can escape
216 * us. (some of these will be overridden and become
217 * 'special' SMP interrupts)
218 */
219 for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
220 int vector = FIRST_EXTERNAL_VECTOR + i;
221 if (vector != IA32_SYSCALL_VECTOR)
222 set_intr_gate(vector, interrupt[i]);
223 }
224
225 apic_intr_init();
219 226
220 if (!acpi_ioapic) 227 if (!acpi_ioapic)
221 setup_irq(2, &irq2); 228 setup_irq(2, &irq2);
diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c
index 7377ccb21335..304d8bad6559 100644
--- a/arch/x86/kernel/k8.c
+++ b/arch/x86/kernel/k8.c
@@ -16,8 +16,9 @@ EXPORT_SYMBOL(num_k8_northbridges);
16static u32 *flush_words; 16static u32 *flush_words;
17 17
18struct pci_device_id k8_nb_ids[] = { 18struct pci_device_id k8_nb_ids[] = {
19 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, 19 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, 20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
21 {} 22 {}
22}; 23};
23EXPORT_SYMBOL(k8_nb_ids); 24EXPORT_SYMBOL(k8_nb_ids);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 8b7a3cf37d2b..478bca986eca 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -178,7 +178,7 @@ static void kvm_flush_tlb(void)
178 kvm_deferred_mmu_op(&ftlb, sizeof ftlb); 178 kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
179} 179}
180 180
181static void kvm_release_pt(u32 pfn) 181static void kvm_release_pt(unsigned long pfn)
182{ 182{
183 struct kvm_mmu_op_release_pt rpt = { 183 struct kvm_mmu_op_release_pt rpt = {
184 .header.op = KVM_MMU_OP_RELEASE_PT, 184 .header.op = KVM_MMU_OP_RELEASE_PT,
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index d02def06ca91..774ac4991568 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -78,6 +78,34 @@ static cycle_t kvm_clock_read(void)
78 return ret; 78 return ret;
79} 79}
80 80
81/*
82 * If we don't do that, there is the possibility that the guest
83 * will calibrate under heavy load - thus, getting a lower lpj -
84 * and execute the delays themselves without load. This is wrong,
85 * because no delay loop can finish beforehand.
86 * Any heuristics is subject to fail, because ultimately, a large
87 * poll of guests can be running and trouble each other. So we preset
88 * lpj here
89 */
90static unsigned long kvm_get_tsc_khz(void)
91{
92 return preset_lpj;
93}
94
95static void kvm_get_preset_lpj(void)
96{
97 struct pvclock_vcpu_time_info *src;
98 unsigned long khz;
99 u64 lpj;
100
101 src = &per_cpu(hv_clock, 0);
102 khz = pvclock_tsc_khz(src);
103
104 lpj = ((u64)khz * 1000);
105 do_div(lpj, HZ);
106 preset_lpj = lpj;
107}
108
81static struct clocksource kvm_clock = { 109static struct clocksource kvm_clock = {
82 .name = "kvm-clock", 110 .name = "kvm-clock",
83 .read = kvm_clock_read, 111 .read = kvm_clock_read,
@@ -153,6 +181,7 @@ void __init kvmclock_init(void)
153 pv_time_ops.get_wallclock = kvm_get_wallclock; 181 pv_time_ops.get_wallclock = kvm_get_wallclock;
154 pv_time_ops.set_wallclock = kvm_set_wallclock; 182 pv_time_ops.set_wallclock = kvm_set_wallclock;
155 pv_time_ops.sched_clock = kvm_clock_read; 183 pv_time_ops.sched_clock = kvm_clock_read;
184 pv_time_ops.get_tsc_khz = kvm_get_tsc_khz;
156#ifdef CONFIG_X86_LOCAL_APIC 185#ifdef CONFIG_X86_LOCAL_APIC
157 pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock; 186 pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock;
158#endif 187#endif
@@ -163,6 +192,7 @@ void __init kvmclock_init(void)
163#ifdef CONFIG_KEXEC 192#ifdef CONFIG_KEXEC
164 machine_ops.crash_shutdown = kvm_crash_shutdown; 193 machine_ops.crash_shutdown = kvm_crash_shutdown;
165#endif 194#endif
195 kvm_get_preset_lpj();
166 clocksource_register(&kvm_clock); 196 clocksource_register(&kvm_clock);
167 } 197 }
168} 198}
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index b68e21f06f4f..eee32b43fee3 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -18,6 +18,7 @@
18#include <asm/ldt.h> 18#include <asm/ldt.h>
19#include <asm/desc.h> 19#include <asm/desc.h>
20#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
21#include <asm/syscalls.h>
21 22
22#ifdef CONFIG_SMP 23#ifdef CONFIG_SMP
23static void flush_ldt(void *current_mm) 24static void flush_ldt(void *current_mm)
@@ -51,6 +52,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
51 memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, 52 memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
52 (mincount - oldsize) * LDT_ENTRY_SIZE); 53 (mincount - oldsize) * LDT_ENTRY_SIZE);
53 54
55 paravirt_alloc_ldt(newldt, mincount);
56
54#ifdef CONFIG_X86_64 57#ifdef CONFIG_X86_64
55 /* CHECKME: Do we really need this ? */ 58 /* CHECKME: Do we really need this ? */
56 wmb(); 59 wmb();
@@ -73,6 +76,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
73#endif 76#endif
74 } 77 }
75 if (oldsize) { 78 if (oldsize) {
79 paravirt_free_ldt(oldldt, oldsize);
76 if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE) 80 if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
77 vfree(oldldt); 81 vfree(oldldt);
78 else 82 else
@@ -84,10 +88,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
84static inline int copy_ldt(mm_context_t *new, mm_context_t *old) 88static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
85{ 89{
86 int err = alloc_ldt(new, old->size, 0); 90 int err = alloc_ldt(new, old->size, 0);
91 int i;
87 92
88 if (err < 0) 93 if (err < 0)
89 return err; 94 return err;
90 memcpy(new->ldt, old->ldt, old->size * LDT_ENTRY_SIZE); 95
96 for(i = 0; i < old->size; i++)
97 write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
91 return 0; 98 return 0;
92} 99}
93 100
@@ -124,6 +131,7 @@ void destroy_context(struct mm_struct *mm)
124 if (mm == current->active_mm) 131 if (mm == current->active_mm)
125 clear_LDT(); 132 clear_LDT();
126#endif 133#endif
134 paravirt_free_ldt(mm->context.ldt, mm->context.size);
127 if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE) 135 if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
128 vfree(mm->context.ldt); 136 vfree(mm->context.ldt);
129 else 137 else
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c
deleted file mode 100644
index 652fa5c38ebe..000000000000
--- a/arch/x86/kernel/microcode.c
+++ /dev/null
@@ -1,853 +0,0 @@
1/*
2 * Intel CPU Microcode Update Driver for Linux
3 *
4 * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
5 * 2006 Shaohua Li <shaohua.li@intel.com>
6 *
7 * This driver allows to upgrade microcode on Intel processors
8 * belonging to IA-32 family - PentiumPro, Pentium II,
9 * Pentium III, Xeon, Pentium 4, etc.
10 *
11 * Reference: Section 8.11 of Volume 3a, IA-32 Intel? Architecture
12 * Software Developer's Manual
13 * Order Number 253668 or free download from:
14 *
15 * http://developer.intel.com/design/pentium4/manuals/253668.htm
16 *
17 * For more information, go to http://www.urbanmyth.org/microcode
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 * 1.0 16 Feb 2000, Tigran Aivazian <tigran@sco.com>
25 * Initial release.
26 * 1.01 18 Feb 2000, Tigran Aivazian <tigran@sco.com>
27 * Added read() support + cleanups.
28 * 1.02 21 Feb 2000, Tigran Aivazian <tigran@sco.com>
29 * Added 'device trimming' support. open(O_WRONLY) zeroes
30 * and frees the saved copy of applied microcode.
31 * 1.03 29 Feb 2000, Tigran Aivazian <tigran@sco.com>
32 * Made to use devfs (/dev/cpu/microcode) + cleanups.
33 * 1.04 06 Jun 2000, Simon Trimmer <simon@veritas.com>
34 * Added misc device support (now uses both devfs and misc).
35 * Added MICROCODE_IOCFREE ioctl to clear memory.
36 * 1.05 09 Jun 2000, Simon Trimmer <simon@veritas.com>
37 * Messages for error cases (non Intel & no suitable microcode).
38 * 1.06 03 Aug 2000, Tigran Aivazian <tigran@veritas.com>
39 * Removed ->release(). Removed exclusive open and status bitmap.
40 * Added microcode_rwsem to serialize read()/write()/ioctl().
41 * Removed global kernel lock usage.
42 * 1.07 07 Sep 2000, Tigran Aivazian <tigran@veritas.com>
43 * Write 0 to 0x8B msr and then cpuid before reading revision,
44 * so that it works even if there were no update done by the
45 * BIOS. Otherwise, reading from 0x8B gives junk (which happened
46 * to be 0 on my machine which is why it worked even when I
47 * disabled update by the BIOS)
48 * Thanks to Eric W. Biederman <ebiederman@lnxi.com> for the fix.
49 * 1.08 11 Dec 2000, Richard Schaal <richard.schaal@intel.com> and
50 * Tigran Aivazian <tigran@veritas.com>
51 * Intel Pentium 4 processor support and bugfixes.
52 * 1.09 30 Oct 2001, Tigran Aivazian <tigran@veritas.com>
53 * Bugfix for HT (Hyper-Threading) enabled processors
54 * whereby processor resources are shared by all logical processors
55 * in a single CPU package.
56 * 1.10 28 Feb 2002 Asit K Mallick <asit.k.mallick@intel.com> and
57 * Tigran Aivazian <tigran@veritas.com>,
58 * Serialize updates as required on HT processors due to speculative
59 * nature of implementation.
60 * 1.11 22 Mar 2002 Tigran Aivazian <tigran@veritas.com>
61 * Fix the panic when writing zero-length microcode chunk.
62 * 1.12 29 Sep 2003 Nitin Kamble <nitin.a.kamble@intel.com>,
63 * Jun Nakajima <jun.nakajima@intel.com>
64 * Support for the microcode updates in the new format.
65 * 1.13 10 Oct 2003 Tigran Aivazian <tigran@veritas.com>
66 * Removed ->read() method and obsoleted MICROCODE_IOCFREE ioctl
67 * because we no longer hold a copy of applied microcode
68 * in kernel memory.
69 * 1.14 25 Jun 2004 Tigran Aivazian <tigran@veritas.com>
70 * Fix sigmatch() macro to handle old CPUs with pf == 0.
71 * Thanks to Stuart Swales for pointing out this bug.
72 */
73
74//#define DEBUG /* pr_debug */
75#include <linux/capability.h>
76#include <linux/kernel.h>
77#include <linux/init.h>
78#include <linux/sched.h>
79#include <linux/smp_lock.h>
80#include <linux/cpumask.h>
81#include <linux/module.h>
82#include <linux/slab.h>
83#include <linux/vmalloc.h>
84#include <linux/miscdevice.h>
85#include <linux/spinlock.h>
86#include <linux/mm.h>
87#include <linux/fs.h>
88#include <linux/mutex.h>
89#include <linux/cpu.h>
90#include <linux/firmware.h>
91#include <linux/platform_device.h>
92
93#include <asm/msr.h>
94#include <asm/uaccess.h>
95#include <asm/processor.h>
96
97MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
98MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
99MODULE_LICENSE("GPL");
100
101#define MICROCODE_VERSION "1.14a"
102
103#define DEFAULT_UCODE_DATASIZE (2000) /* 2000 bytes */
104#define MC_HEADER_SIZE (sizeof (microcode_header_t)) /* 48 bytes */
105#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) /* 2048 bytes */
106#define EXT_HEADER_SIZE (sizeof (struct extended_sigtable)) /* 20 bytes */
107#define EXT_SIGNATURE_SIZE (sizeof (struct extended_signature)) /* 12 bytes */
108#define DWSIZE (sizeof (u32))
109#define get_totalsize(mc) \
110 (((microcode_t *)mc)->hdr.totalsize ? \
111 ((microcode_t *)mc)->hdr.totalsize : DEFAULT_UCODE_TOTALSIZE)
112#define get_datasize(mc) \
113 (((microcode_t *)mc)->hdr.datasize ? \
114 ((microcode_t *)mc)->hdr.datasize : DEFAULT_UCODE_DATASIZE)
115
116#define sigmatch(s1, s2, p1, p2) \
117 (((s1) == (s2)) && (((p1) & (p2)) || (((p1) == 0) && ((p2) == 0))))
118
119#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
120
121/* serialize access to the physical write to MSR 0x79 */
122static DEFINE_SPINLOCK(microcode_update_lock);
123
124/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
125static DEFINE_MUTEX(microcode_mutex);
126
127static struct ucode_cpu_info {
128 int valid;
129 unsigned int sig;
130 unsigned int pf;
131 unsigned int rev;
132 microcode_t *mc;
133} ucode_cpu_info[NR_CPUS];
134
135static void collect_cpu_info(int cpu_num)
136{
137 struct cpuinfo_x86 *c = &cpu_data(cpu_num);
138 struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
139 unsigned int val[2];
140
141 /* We should bind the task to the CPU */
142 BUG_ON(raw_smp_processor_id() != cpu_num);
143 uci->pf = uci->rev = 0;
144 uci->mc = NULL;
145 uci->valid = 1;
146
147 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
148 cpu_has(c, X86_FEATURE_IA64)) {
149 printk(KERN_ERR "microcode: CPU%d not a capable Intel "
150 "processor\n", cpu_num);
151 uci->valid = 0;
152 return;
153 }
154
155 uci->sig = cpuid_eax(0x00000001);
156
157 if ((c->x86_model >= 5) || (c->x86 > 6)) {
158 /* get processor flags from MSR 0x17 */
159 rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
160 uci->pf = 1 << ((val[1] >> 18) & 7);
161 }
162
163 wrmsr(MSR_IA32_UCODE_REV, 0, 0);
164 /* see notes above for revision 1.07. Apparent chip bug */
165 sync_core();
166 /* get the current revision from MSR 0x8B */
167 rdmsr(MSR_IA32_UCODE_REV, val[0], uci->rev);
168 pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n",
169 uci->sig, uci->pf, uci->rev);
170}
171
172static inline int microcode_update_match(int cpu_num,
173 microcode_header_t *mc_header, int sig, int pf)
174{
175 struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
176
177 if (!sigmatch(sig, uci->sig, pf, uci->pf)
178 || mc_header->rev <= uci->rev)
179 return 0;
180 return 1;
181}
182
183static int microcode_sanity_check(void *mc)
184{
185 microcode_header_t *mc_header = mc;
186 struct extended_sigtable *ext_header = NULL;
187 struct extended_signature *ext_sig;
188 unsigned long total_size, data_size, ext_table_size;
189 int sum, orig_sum, ext_sigcount = 0, i;
190
191 total_size = get_totalsize(mc_header);
192 data_size = get_datasize(mc_header);
193 if (data_size + MC_HEADER_SIZE > total_size) {
194 printk(KERN_ERR "microcode: error! "
195 "Bad data size in microcode data file\n");
196 return -EINVAL;
197 }
198
199 if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
200 printk(KERN_ERR "microcode: error! "
201 "Unknown microcode update format\n");
202 return -EINVAL;
203 }
204 ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
205 if (ext_table_size) {
206 if ((ext_table_size < EXT_HEADER_SIZE)
207 || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
208 printk(KERN_ERR "microcode: error! "
209 "Small exttable size in microcode data file\n");
210 return -EINVAL;
211 }
212 ext_header = mc + MC_HEADER_SIZE + data_size;
213 if (ext_table_size != exttable_size(ext_header)) {
214 printk(KERN_ERR "microcode: error! "
215 "Bad exttable size in microcode data file\n");
216 return -EFAULT;
217 }
218 ext_sigcount = ext_header->count;
219 }
220
221 /* check extended table checksum */
222 if (ext_table_size) {
223 int ext_table_sum = 0;
224 int *ext_tablep = (int *)ext_header;
225
226 i = ext_table_size / DWSIZE;
227 while (i--)
228 ext_table_sum += ext_tablep[i];
229 if (ext_table_sum) {
230 printk(KERN_WARNING "microcode: aborting, "
231 "bad extended signature table checksum\n");
232 return -EINVAL;
233 }
234 }
235
236 /* calculate the checksum */
237 orig_sum = 0;
238 i = (MC_HEADER_SIZE + data_size) / DWSIZE;
239 while (i--)
240 orig_sum += ((int *)mc)[i];
241 if (orig_sum) {
242 printk(KERN_ERR "microcode: aborting, bad checksum\n");
243 return -EINVAL;
244 }
245 if (!ext_table_size)
246 return 0;
247 /* check extended signature checksum */
248 for (i = 0; i < ext_sigcount; i++) {
249 ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
250 EXT_SIGNATURE_SIZE * i;
251 sum = orig_sum
252 - (mc_header->sig + mc_header->pf + mc_header->cksum)
253 + (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
254 if (sum) {
255 printk(KERN_ERR "microcode: aborting, bad checksum\n");
256 return -EINVAL;
257 }
258 }
259 return 0;
260}
261
262/*
263 * return 0 - no update found
264 * return 1 - found update
265 * return < 0 - error
266 */
267static int get_maching_microcode(void *mc, int cpu)
268{
269 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
270 microcode_header_t *mc_header = mc;
271 struct extended_sigtable *ext_header;
272 unsigned long total_size = get_totalsize(mc_header);
273 int ext_sigcount, i;
274 struct extended_signature *ext_sig;
275 void *new_mc;
276
277 if (microcode_update_match(cpu, mc_header,
278 mc_header->sig, mc_header->pf))
279 goto find;
280
281 if (total_size <= get_datasize(mc_header) + MC_HEADER_SIZE)
282 return 0;
283
284 ext_header = mc + get_datasize(mc_header) + MC_HEADER_SIZE;
285 ext_sigcount = ext_header->count;
286 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
287 for (i = 0; i < ext_sigcount; i++) {
288 if (microcode_update_match(cpu, mc_header,
289 ext_sig->sig, ext_sig->pf))
290 goto find;
291 ext_sig++;
292 }
293 return 0;
294find:
295 pr_debug("microcode: CPU%d found a matching microcode update with"
296 " version 0x%x (current=0x%x)\n", cpu, mc_header->rev,uci->rev);
297 new_mc = vmalloc(total_size);
298 if (!new_mc) {
299 printk(KERN_ERR "microcode: error! Can not allocate memory\n");
300 return -ENOMEM;
301 }
302
303 /* free previous update file */
304 vfree(uci->mc);
305
306 memcpy(new_mc, mc, total_size);
307 uci->mc = new_mc;
308 return 1;
309}
310
311static void apply_microcode(int cpu)
312{
313 unsigned long flags;
314 unsigned int val[2];
315 int cpu_num = raw_smp_processor_id();
316 struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
317
318 /* We should bind the task to the CPU */
319 BUG_ON(cpu_num != cpu);
320
321 if (uci->mc == NULL)
322 return;
323
324 /* serialize access to the physical write to MSR 0x79 */
325 spin_lock_irqsave(&microcode_update_lock, flags);
326
327 /* write microcode via MSR 0x79 */
328 wrmsr(MSR_IA32_UCODE_WRITE,
329 (unsigned long) uci->mc->bits,
330 (unsigned long) uci->mc->bits >> 16 >> 16);
331 wrmsr(MSR_IA32_UCODE_REV, 0, 0);
332
333 /* see notes above for revision 1.07. Apparent chip bug */
334 sync_core();
335
336 /* get the current revision from MSR 0x8B */
337 rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
338
339 spin_unlock_irqrestore(&microcode_update_lock, flags);
340 if (val[1] != uci->mc->hdr.rev) {
341 printk(KERN_ERR "microcode: CPU%d update from revision "
342 "0x%x to 0x%x failed\n", cpu_num, uci->rev, val[1]);
343 return;
344 }
345 printk(KERN_INFO "microcode: CPU%d updated from revision "
346 "0x%x to 0x%x, date = %08x \n",
347 cpu_num, uci->rev, val[1], uci->mc->hdr.date);
348 uci->rev = val[1];
349}
350
351#ifdef CONFIG_MICROCODE_OLD_INTERFACE
352static void __user *user_buffer; /* user area microcode data buffer */
353static unsigned int user_buffer_size; /* it's size */
354
355static long get_next_ucode(void **mc, long offset)
356{
357 microcode_header_t mc_header;
358 unsigned long total_size;
359
360 /* No more data */
361 if (offset >= user_buffer_size)
362 return 0;
363 if (copy_from_user(&mc_header, user_buffer + offset, MC_HEADER_SIZE)) {
364 printk(KERN_ERR "microcode: error! Can not read user data\n");
365 return -EFAULT;
366 }
367 total_size = get_totalsize(&mc_header);
368 if (offset + total_size > user_buffer_size) {
369 printk(KERN_ERR "microcode: error! Bad total size in microcode "
370 "data file\n");
371 return -EINVAL;
372 }
373 *mc = vmalloc(total_size);
374 if (!*mc)
375 return -ENOMEM;
376 if (copy_from_user(*mc, user_buffer + offset, total_size)) {
377 printk(KERN_ERR "microcode: error! Can not read user data\n");
378 vfree(*mc);
379 return -EFAULT;
380 }
381 return offset + total_size;
382}
383
384static int do_microcode_update (void)
385{
386 long cursor = 0;
387 int error = 0;
388 void *new_mc = NULL;
389 int cpu;
390 cpumask_t old;
391
392 old = current->cpus_allowed;
393
394 while ((cursor = get_next_ucode(&new_mc, cursor)) > 0) {
395 error = microcode_sanity_check(new_mc);
396 if (error)
397 goto out;
398 /*
399 * It's possible the data file has multiple matching ucode,
400 * lets keep searching till the latest version
401 */
402 for_each_online_cpu(cpu) {
403 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
404
405 if (!uci->valid)
406 continue;
407 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
408 error = get_maching_microcode(new_mc, cpu);
409 if (error < 0)
410 goto out;
411 if (error == 1)
412 apply_microcode(cpu);
413 }
414 vfree(new_mc);
415 }
416out:
417 if (cursor > 0)
418 vfree(new_mc);
419 if (cursor < 0)
420 error = cursor;
421 set_cpus_allowed_ptr(current, &old);
422 return error;
423}
424
425static int microcode_open (struct inode *unused1, struct file *unused2)
426{
427 cycle_kernel_lock();
428 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
429}
430
431static ssize_t microcode_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
432{
433 ssize_t ret;
434
435 if ((len >> PAGE_SHIFT) > num_physpages) {
436 printk(KERN_ERR "microcode: too much data (max %ld pages)\n", num_physpages);
437 return -EINVAL;
438 }
439
440 get_online_cpus();
441 mutex_lock(&microcode_mutex);
442
443 user_buffer = (void __user *) buf;
444 user_buffer_size = (int) len;
445
446 ret = do_microcode_update();
447 if (!ret)
448 ret = (ssize_t)len;
449
450 mutex_unlock(&microcode_mutex);
451 put_online_cpus();
452
453 return ret;
454}
455
456static const struct file_operations microcode_fops = {
457 .owner = THIS_MODULE,
458 .write = microcode_write,
459 .open = microcode_open,
460};
461
462static struct miscdevice microcode_dev = {
463 .minor = MICROCODE_MINOR,
464 .name = "microcode",
465 .fops = &microcode_fops,
466};
467
468static int __init microcode_dev_init (void)
469{
470 int error;
471
472 error = misc_register(&microcode_dev);
473 if (error) {
474 printk(KERN_ERR
475 "microcode: can't misc_register on minor=%d\n",
476 MICROCODE_MINOR);
477 return error;
478 }
479
480 return 0;
481}
482
483static void microcode_dev_exit (void)
484{
485 misc_deregister(&microcode_dev);
486}
487
488MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
489#else
490#define microcode_dev_init() 0
491#define microcode_dev_exit() do { } while(0)
492#endif
493
494static long get_next_ucode_from_buffer(void **mc, const u8 *buf,
495 unsigned long size, long offset)
496{
497 microcode_header_t *mc_header;
498 unsigned long total_size;
499
500 /* No more data */
501 if (offset >= size)
502 return 0;
503 mc_header = (microcode_header_t *)(buf + offset);
504 total_size = get_totalsize(mc_header);
505
506 if (offset + total_size > size) {
507 printk(KERN_ERR "microcode: error! Bad data in microcode data file\n");
508 return -EINVAL;
509 }
510
511 *mc = vmalloc(total_size);
512 if (!*mc) {
513 printk(KERN_ERR "microcode: error! Can not allocate memory\n");
514 return -ENOMEM;
515 }
516 memcpy(*mc, buf + offset, total_size);
517 return offset + total_size;
518}
519
520/* fake device for request_firmware */
521static struct platform_device *microcode_pdev;
522
523static int cpu_request_microcode(int cpu)
524{
525 char name[30];
526 struct cpuinfo_x86 *c = &cpu_data(cpu);
527 const struct firmware *firmware;
528 const u8 *buf;
529 unsigned long size;
530 long offset = 0;
531 int error;
532 void *mc;
533
534 /* We should bind the task to the CPU */
535 BUG_ON(cpu != raw_smp_processor_id());
536 sprintf(name,"intel-ucode/%02x-%02x-%02x",
537 c->x86, c->x86_model, c->x86_mask);
538 error = request_firmware(&firmware, name, &microcode_pdev->dev);
539 if (error) {
540 pr_debug("microcode: data file %s load failed\n", name);
541 return error;
542 }
543 buf = firmware->data;
544 size = firmware->size;
545 while ((offset = get_next_ucode_from_buffer(&mc, buf, size, offset))
546 > 0) {
547 error = microcode_sanity_check(mc);
548 if (error)
549 break;
550 error = get_maching_microcode(mc, cpu);
551 if (error < 0)
552 break;
553 /*
554 * It's possible the data file has multiple matching ucode,
555 * lets keep searching till the latest version
556 */
557 if (error == 1) {
558 apply_microcode(cpu);
559 error = 0;
560 }
561 vfree(mc);
562 }
563 if (offset > 0)
564 vfree(mc);
565 if (offset < 0)
566 error = offset;
567 release_firmware(firmware);
568
569 return error;
570}
571
572static int apply_microcode_check_cpu(int cpu)
573{
574 struct cpuinfo_x86 *c = &cpu_data(cpu);
575 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
576 cpumask_t old;
577 unsigned int val[2];
578 int err = 0;
579
580 /* Check if the microcode is available */
581 if (!uci->mc)
582 return 0;
583
584 old = current->cpus_allowed;
585 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
586
587 /* Check if the microcode we have in memory matches the CPU */
588 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
589 cpu_has(c, X86_FEATURE_IA64) || uci->sig != cpuid_eax(0x00000001))
590 err = -EINVAL;
591
592 if (!err && ((c->x86_model >= 5) || (c->x86 > 6))) {
593 /* get processor flags from MSR 0x17 */
594 rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
595 if (uci->pf != (1 << ((val[1] >> 18) & 7)))
596 err = -EINVAL;
597 }
598
599 if (!err) {
600 wrmsr(MSR_IA32_UCODE_REV, 0, 0);
601 /* see notes above for revision 1.07. Apparent chip bug */
602 sync_core();
603 /* get the current revision from MSR 0x8B */
604 rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
605 if (uci->rev != val[1])
606 err = -EINVAL;
607 }
608
609 if (!err)
610 apply_microcode(cpu);
611 else
612 printk(KERN_ERR "microcode: Could not apply microcode to CPU%d:"
613 " sig=0x%x, pf=0x%x, rev=0x%x\n",
614 cpu, uci->sig, uci->pf, uci->rev);
615
616 set_cpus_allowed_ptr(current, &old);
617 return err;
618}
619
620static void microcode_init_cpu(int cpu, int resume)
621{
622 cpumask_t old;
623 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
624
625 old = current->cpus_allowed;
626
627 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
628 mutex_lock(&microcode_mutex);
629 collect_cpu_info(cpu);
630 if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
631 cpu_request_microcode(cpu);
632 mutex_unlock(&microcode_mutex);
633 set_cpus_allowed_ptr(current, &old);
634}
635
636static void microcode_fini_cpu(int cpu)
637{
638 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
639
640 mutex_lock(&microcode_mutex);
641 uci->valid = 0;
642 vfree(uci->mc);
643 uci->mc = NULL;
644 mutex_unlock(&microcode_mutex);
645}
646
647static ssize_t reload_store(struct sys_device *dev,
648 struct sysdev_attribute *attr,
649 const char *buf, size_t sz)
650{
651 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
652 char *end;
653 unsigned long val = simple_strtoul(buf, &end, 0);
654 int err = 0;
655 int cpu = dev->id;
656
657 if (end == buf)
658 return -EINVAL;
659 if (val == 1) {
660 cpumask_t old = current->cpus_allowed;
661
662 get_online_cpus();
663 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
664
665 mutex_lock(&microcode_mutex);
666 if (uci->valid)
667 err = cpu_request_microcode(cpu);
668 mutex_unlock(&microcode_mutex);
669 put_online_cpus();
670 set_cpus_allowed_ptr(current, &old);
671 }
672 if (err)
673 return err;
674 return sz;
675}
676
677static ssize_t version_show(struct sys_device *dev,
678 struct sysdev_attribute *attr, char *buf)
679{
680 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
681
682 return sprintf(buf, "0x%x\n", uci->rev);
683}
684
685static ssize_t pf_show(struct sys_device *dev,
686 struct sysdev_attribute *attr, char *buf)
687{
688 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
689
690 return sprintf(buf, "0x%x\n", uci->pf);
691}
692
693static SYSDEV_ATTR(reload, 0200, NULL, reload_store);
694static SYSDEV_ATTR(version, 0400, version_show, NULL);
695static SYSDEV_ATTR(processor_flags, 0400, pf_show, NULL);
696
697static struct attribute *mc_default_attrs[] = {
698 &attr_reload.attr,
699 &attr_version.attr,
700 &attr_processor_flags.attr,
701 NULL
702};
703
704static struct attribute_group mc_attr_group = {
705 .attrs = mc_default_attrs,
706 .name = "microcode",
707};
708
709static int __mc_sysdev_add(struct sys_device *sys_dev, int resume)
710{
711 int err, cpu = sys_dev->id;
712 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
713
714 if (!cpu_online(cpu))
715 return 0;
716
717 pr_debug("microcode: CPU%d added\n", cpu);
718 memset(uci, 0, sizeof(*uci));
719
720 err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group);
721 if (err)
722 return err;
723
724 microcode_init_cpu(cpu, resume);
725
726 return 0;
727}
728
729static int mc_sysdev_add(struct sys_device *sys_dev)
730{
731 return __mc_sysdev_add(sys_dev, 0);
732}
733
734static int mc_sysdev_remove(struct sys_device *sys_dev)
735{
736 int cpu = sys_dev->id;
737
738 if (!cpu_online(cpu))
739 return 0;
740
741 pr_debug("microcode: CPU%d removed\n", cpu);
742 microcode_fini_cpu(cpu);
743 sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
744 return 0;
745}
746
747static int mc_sysdev_resume(struct sys_device *dev)
748{
749 int cpu = dev->id;
750
751 if (!cpu_online(cpu))
752 return 0;
753 pr_debug("microcode: CPU%d resumed\n", cpu);
754 /* only CPU 0 will apply ucode here */
755 apply_microcode(0);
756 return 0;
757}
758
759static struct sysdev_driver mc_sysdev_driver = {
760 .add = mc_sysdev_add,
761 .remove = mc_sysdev_remove,
762 .resume = mc_sysdev_resume,
763};
764
765static __cpuinit int
766mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
767{
768 unsigned int cpu = (unsigned long)hcpu;
769 struct sys_device *sys_dev;
770
771 sys_dev = get_cpu_sysdev(cpu);
772 switch (action) {
773 case CPU_UP_CANCELED_FROZEN:
774 /* The CPU refused to come up during a system resume */
775 microcode_fini_cpu(cpu);
776 break;
777 case CPU_ONLINE:
778 case CPU_DOWN_FAILED:
779 mc_sysdev_add(sys_dev);
780 break;
781 case CPU_ONLINE_FROZEN:
782 /* System-wide resume is in progress, try to apply microcode */
783 if (apply_microcode_check_cpu(cpu)) {
784 /* The application of microcode failed */
785 microcode_fini_cpu(cpu);
786 __mc_sysdev_add(sys_dev, 1);
787 break;
788 }
789 case CPU_DOWN_FAILED_FROZEN:
790 if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group))
791 printk(KERN_ERR "microcode: Failed to create the sysfs "
792 "group for CPU%d\n", cpu);
793 break;
794 case CPU_DOWN_PREPARE:
795 mc_sysdev_remove(sys_dev);
796 break;
797 case CPU_DOWN_PREPARE_FROZEN:
798 /* Suspend is in progress, only remove the interface */
799 sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
800 break;
801 }
802 return NOTIFY_OK;
803}
804
805static struct notifier_block __refdata mc_cpu_notifier = {
806 .notifier_call = mc_cpu_callback,
807};
808
809static int __init microcode_init (void)
810{
811 int error;
812
813 printk(KERN_INFO
814 "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@aivazian.fsnet.co.uk>\n");
815
816 error = microcode_dev_init();
817 if (error)
818 return error;
819 microcode_pdev = platform_device_register_simple("microcode", -1,
820 NULL, 0);
821 if (IS_ERR(microcode_pdev)) {
822 microcode_dev_exit();
823 return PTR_ERR(microcode_pdev);
824 }
825
826 get_online_cpus();
827 error = sysdev_driver_register(&cpu_sysdev_class, &mc_sysdev_driver);
828 put_online_cpus();
829 if (error) {
830 microcode_dev_exit();
831 platform_device_unregister(microcode_pdev);
832 return error;
833 }
834
835 register_hotcpu_notifier(&mc_cpu_notifier);
836 return 0;
837}
838
839static void __exit microcode_exit (void)
840{
841 microcode_dev_exit();
842
843 unregister_hotcpu_notifier(&mc_cpu_notifier);
844
845 get_online_cpus();
846 sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver);
847 put_online_cpus();
848
849 platform_device_unregister(microcode_pdev);
850}
851
852module_init(microcode_init)
853module_exit(microcode_exit)
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
new file mode 100644
index 000000000000..7a1f8eeac2c7
--- /dev/null
+++ b/arch/x86/kernel/microcode_amd.c
@@ -0,0 +1,435 @@
1/*
2 * AMD CPU Microcode Update Driver for Linux
3 * Copyright (C) 2008 Advanced Micro Devices Inc.
4 *
5 * Author: Peter Oruba <peter.oruba@amd.com>
6 *
7 * Based on work by:
8 * Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
9 *
10 * This driver allows to upgrade microcode on AMD
11 * family 0x10 and 0x11 processors.
12 *
13 * Licensed unter the terms of the GNU General Public
14 * License version 2. See file COPYING for details.
15*/
16
17#include <linux/capability.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/sched.h>
21#include <linux/cpumask.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include <linux/miscdevice.h>
26#include <linux/spinlock.h>
27#include <linux/mm.h>
28#include <linux/fs.h>
29#include <linux/mutex.h>
30#include <linux/cpu.h>
31#include <linux/firmware.h>
32#include <linux/platform_device.h>
33#include <linux/pci.h>
34#include <linux/pci_ids.h>
35
36#include <asm/msr.h>
37#include <asm/uaccess.h>
38#include <asm/processor.h>
39#include <asm/microcode.h>
40
41MODULE_DESCRIPTION("AMD Microcode Update Driver");
42MODULE_AUTHOR("Peter Oruba <peter.oruba@amd.com>");
43MODULE_LICENSE("GPL v2");
44
45#define UCODE_MAGIC 0x00414d44
46#define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
47#define UCODE_UCODE_TYPE 0x00000001
48
49struct equiv_cpu_entry {
50 unsigned int installed_cpu;
51 unsigned int fixed_errata_mask;
52 unsigned int fixed_errata_compare;
53 unsigned int equiv_cpu;
54};
55
56struct microcode_header_amd {
57 unsigned int data_code;
58 unsigned int patch_id;
59 unsigned char mc_patch_data_id[2];
60 unsigned char mc_patch_data_len;
61 unsigned char init_flag;
62 unsigned int mc_patch_data_checksum;
63 unsigned int nb_dev_id;
64 unsigned int sb_dev_id;
65 unsigned char processor_rev_id[2];
66 unsigned char nb_rev_id;
67 unsigned char sb_rev_id;
68 unsigned char bios_api_rev;
69 unsigned char reserved1[3];
70 unsigned int match_reg[8];
71};
72
73struct microcode_amd {
74 struct microcode_header_amd hdr;
75 unsigned int mpb[0];
76};
77
78#define UCODE_MAX_SIZE (2048)
79#define DEFAULT_UCODE_DATASIZE (896)
80#define MC_HEADER_SIZE (sizeof(struct microcode_header_amd))
81#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)
82#define DWSIZE (sizeof(u32))
83/* For now we support a fixed ucode total size only */
84#define get_totalsize(mc) \
85 ((((struct microcode_amd *)mc)->hdr.mc_patch_data_len * 28) \
86 + MC_HEADER_SIZE)
87
88/* serialize access to the physical write */
89static DEFINE_SPINLOCK(microcode_update_lock);
90
91static struct equiv_cpu_entry *equiv_cpu_table;
92
93static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
94{
95 struct cpuinfo_x86 *c = &cpu_data(cpu);
96
97 memset(csig, 0, sizeof(*csig));
98
99 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
100 printk(KERN_ERR "microcode: CPU%d not a capable AMD processor\n",
101 cpu);
102 return -1;
103 }
104
105 asm volatile("movl %1, %%ecx; rdmsr"
106 : "=a" (csig->rev)
107 : "i" (0x0000008B) : "ecx");
108
109 printk(KERN_INFO "microcode: collect_cpu_info_amd : patch_id=0x%x\n",
110 csig->rev);
111
112 return 0;
113}
114
115static int get_matching_microcode(int cpu, void *mc, int rev)
116{
117 struct microcode_header_amd *mc_header = mc;
118 struct pci_dev *nb_pci_dev, *sb_pci_dev;
119 unsigned int current_cpu_id;
120 unsigned int equiv_cpu_id = 0x00;
121 unsigned int i = 0;
122
123 BUG_ON(equiv_cpu_table == NULL);
124 current_cpu_id = cpuid_eax(0x00000001);
125
126 while (equiv_cpu_table[i].installed_cpu != 0) {
127 if (current_cpu_id == equiv_cpu_table[i].installed_cpu) {
128 equiv_cpu_id = equiv_cpu_table[i].equiv_cpu;
129 break;
130 }
131 i++;
132 }
133
134 if (!equiv_cpu_id) {
135 printk(KERN_ERR "microcode: CPU%d cpu_id "
136 "not found in equivalent cpu table \n", cpu);
137 return 0;
138 }
139
140 if ((mc_header->processor_rev_id[0]) != (equiv_cpu_id & 0xff)) {
141 printk(KERN_ERR
142 "microcode: CPU%d patch does not match "
143 "(patch is %x, cpu extended is %x) \n",
144 cpu, mc_header->processor_rev_id[0],
145 (equiv_cpu_id & 0xff));
146 return 0;
147 }
148
149 if ((mc_header->processor_rev_id[1]) != ((equiv_cpu_id >> 16) & 0xff)) {
150 printk(KERN_ERR "microcode: CPU%d patch does not match "
151 "(patch is %x, cpu base id is %x) \n",
152 cpu, mc_header->processor_rev_id[1],
153 ((equiv_cpu_id >> 16) & 0xff));
154
155 return 0;
156 }
157
158 /* ucode may be northbridge specific */
159 if (mc_header->nb_dev_id) {
160 nb_pci_dev = pci_get_device(PCI_VENDOR_ID_AMD,
161 (mc_header->nb_dev_id & 0xff),
162 NULL);
163 if ((!nb_pci_dev) ||
164 (mc_header->nb_rev_id != nb_pci_dev->revision)) {
165 printk(KERN_ERR "microcode: CPU%d NB mismatch \n", cpu);
166 pci_dev_put(nb_pci_dev);
167 return 0;
168 }
169 pci_dev_put(nb_pci_dev);
170 }
171
172 /* ucode may be southbridge specific */
173 if (mc_header->sb_dev_id) {
174 sb_pci_dev = pci_get_device(PCI_VENDOR_ID_AMD,
175 (mc_header->sb_dev_id & 0xff),
176 NULL);
177 if ((!sb_pci_dev) ||
178 (mc_header->sb_rev_id != sb_pci_dev->revision)) {
179 printk(KERN_ERR "microcode: CPU%d SB mismatch \n", cpu);
180 pci_dev_put(sb_pci_dev);
181 return 0;
182 }
183 pci_dev_put(sb_pci_dev);
184 }
185
186 if (mc_header->patch_id <= rev)
187 return 0;
188
189 return 1;
190}
191
192static void apply_microcode_amd(int cpu)
193{
194 unsigned long flags;
195 unsigned int eax, edx;
196 unsigned int rev;
197 int cpu_num = raw_smp_processor_id();
198 struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
199 struct microcode_amd *mc_amd = uci->mc;
200 unsigned long addr;
201
202 /* We should bind the task to the CPU */
203 BUG_ON(cpu_num != cpu);
204
205 if (mc_amd == NULL)
206 return;
207
208 spin_lock_irqsave(&microcode_update_lock, flags);
209
210 addr = (unsigned long)&mc_amd->hdr.data_code;
211 edx = (unsigned int)(((unsigned long)upper_32_bits(addr)));
212 eax = (unsigned int)(((unsigned long)lower_32_bits(addr)));
213
214 asm volatile("movl %0, %%ecx; wrmsr" :
215 : "i" (0xc0010020), "a" (eax), "d" (edx) : "ecx");
216
217 /* get patch id after patching */
218 asm volatile("movl %1, %%ecx; rdmsr"
219 : "=a" (rev)
220 : "i" (0x0000008B) : "ecx");
221
222 spin_unlock_irqrestore(&microcode_update_lock, flags);
223
224 /* check current patch id and patch's id for match */
225 if (rev != mc_amd->hdr.patch_id) {
226 printk(KERN_ERR "microcode: CPU%d update from revision "
227 "0x%x to 0x%x failed\n", cpu_num,
228 mc_amd->hdr.patch_id, rev);
229 return;
230 }
231
232 printk(KERN_INFO "microcode: CPU%d updated from revision "
233 "0x%x to 0x%x \n",
234 cpu_num, uci->cpu_sig.rev, mc_amd->hdr.patch_id);
235
236 uci->cpu_sig.rev = rev;
237}
238
239static void * get_next_ucode(u8 *buf, unsigned int size,
240 int (*get_ucode_data)(void *, const void *, size_t),
241 unsigned int *mc_size)
242{
243 unsigned int total_size;
244#define UCODE_CONTAINER_SECTION_HDR 8
245 u8 section_hdr[UCODE_CONTAINER_SECTION_HDR];
246 void *mc;
247
248 if (get_ucode_data(section_hdr, buf, UCODE_CONTAINER_SECTION_HDR))
249 return NULL;
250
251 if (section_hdr[0] != UCODE_UCODE_TYPE) {
252 printk(KERN_ERR "microcode: error! "
253 "Wrong microcode payload type field\n");
254 return NULL;
255 }
256
257 total_size = (unsigned long) (section_hdr[4] + (section_hdr[5] << 8));
258
259 printk(KERN_INFO "microcode: size %u, total_size %u\n",
260 size, total_size);
261
262 if (total_size > size || total_size > UCODE_MAX_SIZE) {
263 printk(KERN_ERR "microcode: error! Bad data in microcode data file\n");
264 return NULL;
265 }
266
267 mc = vmalloc(UCODE_MAX_SIZE);
268 if (mc) {
269 memset(mc, 0, UCODE_MAX_SIZE);
270 if (get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, total_size)) {
271 vfree(mc);
272 mc = NULL;
273 } else
274 *mc_size = total_size + UCODE_CONTAINER_SECTION_HDR;
275 }
276#undef UCODE_CONTAINER_SECTION_HDR
277 return mc;
278}
279
280
281static int install_equiv_cpu_table(u8 *buf,
282 int (*get_ucode_data)(void *, const void *, size_t))
283{
284#define UCODE_CONTAINER_HEADER_SIZE 12
285 u8 *container_hdr[UCODE_CONTAINER_HEADER_SIZE];
286 unsigned int *buf_pos = (unsigned int *)container_hdr;
287 unsigned long size;
288
289 if (get_ucode_data(&container_hdr, buf, UCODE_CONTAINER_HEADER_SIZE))
290 return 0;
291
292 size = buf_pos[2];
293
294 if (buf_pos[1] != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
295 printk(KERN_ERR "microcode: error! "
296 "Wrong microcode equivalnet cpu table\n");
297 return 0;
298 }
299
300 equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size);
301 if (!equiv_cpu_table) {
302 printk(KERN_ERR "microcode: error, can't allocate memory for equiv CPU table\n");
303 return 0;
304 }
305
306 buf += UCODE_CONTAINER_HEADER_SIZE;
307 if (get_ucode_data(equiv_cpu_table, buf, size)) {
308 vfree(equiv_cpu_table);
309 return 0;
310 }
311
312 return size + UCODE_CONTAINER_HEADER_SIZE; /* add header length */
313#undef UCODE_CONTAINER_HEADER_SIZE
314}
315
316static void free_equiv_cpu_table(void)
317{
318 if (equiv_cpu_table) {
319 vfree(equiv_cpu_table);
320 equiv_cpu_table = NULL;
321 }
322}
323
324static int generic_load_microcode(int cpu, void *data, size_t size,
325 int (*get_ucode_data)(void *, const void *, size_t))
326{
327 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
328 u8 *ucode_ptr = data, *new_mc = NULL, *mc;
329 int new_rev = uci->cpu_sig.rev;
330 unsigned int leftover;
331 unsigned long offset;
332
333 offset = install_equiv_cpu_table(ucode_ptr, get_ucode_data);
334 if (!offset) {
335 printk(KERN_ERR "microcode: installing equivalent cpu table failed\n");
336 return -EINVAL;
337 }
338
339 ucode_ptr += offset;
340 leftover = size - offset;
341
342 while (leftover) {
343 unsigned int uninitialized_var(mc_size);
344 struct microcode_header_amd *mc_header;
345
346 mc = get_next_ucode(ucode_ptr, leftover, get_ucode_data, &mc_size);
347 if (!mc)
348 break;
349
350 mc_header = (struct microcode_header_amd *)mc;
351 if (get_matching_microcode(cpu, mc, new_rev)) {
352 if (new_mc)
353 vfree(new_mc);
354 new_rev = mc_header->patch_id;
355 new_mc = mc;
356 } else
357 vfree(mc);
358
359 ucode_ptr += mc_size;
360 leftover -= mc_size;
361 }
362
363 if (new_mc) {
364 if (!leftover) {
365 if (uci->mc)
366 vfree(uci->mc);
367 uci->mc = new_mc;
368 pr_debug("microcode: CPU%d found a matching microcode update with"
369 " version 0x%x (current=0x%x)\n",
370 cpu, new_rev, uci->cpu_sig.rev);
371 } else
372 vfree(new_mc);
373 }
374
375 free_equiv_cpu_table();
376
377 return (int)leftover;
378}
379
380static int get_ucode_fw(void *to, const void *from, size_t n)
381{
382 memcpy(to, from, n);
383 return 0;
384}
385
386static int request_microcode_fw(int cpu, struct device *device)
387{
388 const char *fw_name = "amd-ucode/microcode_amd.bin";
389 const struct firmware *firmware;
390 int ret;
391
392 /* We should bind the task to the CPU */
393 BUG_ON(cpu != raw_smp_processor_id());
394
395 ret = request_firmware(&firmware, fw_name, device);
396 if (ret) {
397 printk(KERN_ERR "microcode: ucode data file %s load failed\n", fw_name);
398 return ret;
399 }
400
401 ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size,
402 &get_ucode_fw);
403
404 release_firmware(firmware);
405
406 return ret;
407}
408
409static int request_microcode_user(int cpu, const void __user *buf, size_t size)
410{
411 printk(KERN_WARNING "microcode: AMD microcode update via /dev/cpu/microcode"
412 "is not supported\n");
413 return -1;
414}
415
416static void microcode_fini_cpu_amd(int cpu)
417{
418 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
419
420 vfree(uci->mc);
421 uci->mc = NULL;
422}
423
424static struct microcode_ops microcode_amd_ops = {
425 .request_microcode_user = request_microcode_user,
426 .request_microcode_fw = request_microcode_fw,
427 .collect_cpu_info = collect_cpu_info_amd,
428 .apply_microcode = apply_microcode_amd,
429 .microcode_fini_cpu = microcode_fini_cpu_amd,
430};
431
432struct microcode_ops * __init init_amd_microcode(void)
433{
434 return &microcode_amd_ops;
435}
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
new file mode 100644
index 000000000000..936d8d55f230
--- /dev/null
+++ b/arch/x86/kernel/microcode_core.c
@@ -0,0 +1,508 @@
1/*
2 * Intel CPU Microcode Update Driver for Linux
3 *
4 * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
5 * 2006 Shaohua Li <shaohua.li@intel.com>
6 *
7 * This driver allows to upgrade microcode on Intel processors
8 * belonging to IA-32 family - PentiumPro, Pentium II,
9 * Pentium III, Xeon, Pentium 4, etc.
10 *
11 * Reference: Section 8.11 of Volume 3a, IA-32 Intel? Architecture
12 * Software Developer's Manual
13 * Order Number 253668 or free download from:
14 *
15 * http://developer.intel.com/design/pentium4/manuals/253668.htm
16 *
17 * For more information, go to http://www.urbanmyth.org/microcode
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 * 1.0 16 Feb 2000, Tigran Aivazian <tigran@sco.com>
25 * Initial release.
26 * 1.01 18 Feb 2000, Tigran Aivazian <tigran@sco.com>
27 * Added read() support + cleanups.
28 * 1.02 21 Feb 2000, Tigran Aivazian <tigran@sco.com>
29 * Added 'device trimming' support. open(O_WRONLY) zeroes
30 * and frees the saved copy of applied microcode.
31 * 1.03 29 Feb 2000, Tigran Aivazian <tigran@sco.com>
32 * Made to use devfs (/dev/cpu/microcode) + cleanups.
33 * 1.04 06 Jun 2000, Simon Trimmer <simon@veritas.com>
34 * Added misc device support (now uses both devfs and misc).
35 * Added MICROCODE_IOCFREE ioctl to clear memory.
36 * 1.05 09 Jun 2000, Simon Trimmer <simon@veritas.com>
37 * Messages for error cases (non Intel & no suitable microcode).
38 * 1.06 03 Aug 2000, Tigran Aivazian <tigran@veritas.com>
39 * Removed ->release(). Removed exclusive open and status bitmap.
40 * Added microcode_rwsem to serialize read()/write()/ioctl().
41 * Removed global kernel lock usage.
42 * 1.07 07 Sep 2000, Tigran Aivazian <tigran@veritas.com>
43 * Write 0 to 0x8B msr and then cpuid before reading revision,
44 * so that it works even if there were no update done by the
45 * BIOS. Otherwise, reading from 0x8B gives junk (which happened
46 * to be 0 on my machine which is why it worked even when I
47 * disabled update by the BIOS)
48 * Thanks to Eric W. Biederman <ebiederman@lnxi.com> for the fix.
49 * 1.08 11 Dec 2000, Richard Schaal <richard.schaal@intel.com> and
50 * Tigran Aivazian <tigran@veritas.com>
51 * Intel Pentium 4 processor support and bugfixes.
52 * 1.09 30 Oct 2001, Tigran Aivazian <tigran@veritas.com>
53 * Bugfix for HT (Hyper-Threading) enabled processors
54 * whereby processor resources are shared by all logical processors
55 * in a single CPU package.
56 * 1.10 28 Feb 2002 Asit K Mallick <asit.k.mallick@intel.com> and
57 * Tigran Aivazian <tigran@veritas.com>,
58 * Serialize updates as required on HT processors due to
59 * speculative nature of implementation.
60 * 1.11 22 Mar 2002 Tigran Aivazian <tigran@veritas.com>
61 * Fix the panic when writing zero-length microcode chunk.
62 * 1.12 29 Sep 2003 Nitin Kamble <nitin.a.kamble@intel.com>,
63 * Jun Nakajima <jun.nakajima@intel.com>
64 * Support for the microcode updates in the new format.
65 * 1.13 10 Oct 2003 Tigran Aivazian <tigran@veritas.com>
66 * Removed ->read() method and obsoleted MICROCODE_IOCFREE ioctl
67 * because we no longer hold a copy of applied microcode
68 * in kernel memory.
69 * 1.14 25 Jun 2004 Tigran Aivazian <tigran@veritas.com>
70 * Fix sigmatch() macro to handle old CPUs with pf == 0.
71 * Thanks to Stuart Swales for pointing out this bug.
72 */
73#include <linux/capability.h>
74#include <linux/kernel.h>
75#include <linux/init.h>
76#include <linux/sched.h>
77#include <linux/smp_lock.h>
78#include <linux/cpumask.h>
79#include <linux/module.h>
80#include <linux/slab.h>
81#include <linux/vmalloc.h>
82#include <linux/miscdevice.h>
83#include <linux/spinlock.h>
84#include <linux/mm.h>
85#include <linux/fs.h>
86#include <linux/mutex.h>
87#include <linux/cpu.h>
88#include <linux/firmware.h>
89#include <linux/platform_device.h>
90
91#include <asm/msr.h>
92#include <asm/uaccess.h>
93#include <asm/processor.h>
94#include <asm/microcode.h>
95
96MODULE_DESCRIPTION("Microcode Update Driver");
97MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
98MODULE_LICENSE("GPL");
99
100#define MICROCODE_VERSION "2.00"
101
102struct microcode_ops *microcode_ops;
103
104/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
105static DEFINE_MUTEX(microcode_mutex);
106
107struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
108EXPORT_SYMBOL_GPL(ucode_cpu_info);
109
110#ifdef CONFIG_MICROCODE_OLD_INTERFACE
111static int do_microcode_update(const void __user *buf, size_t size)
112{
113 cpumask_t old;
114 int error = 0;
115 int cpu;
116
117 old = current->cpus_allowed;
118
119 for_each_online_cpu(cpu) {
120 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
121
122 if (!uci->valid)
123 continue;
124
125 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
126 error = microcode_ops->request_microcode_user(cpu, buf, size);
127 if (error < 0)
128 goto out;
129 if (!error)
130 microcode_ops->apply_microcode(cpu);
131 }
132out:
133 set_cpus_allowed_ptr(current, &old);
134 return error;
135}
136
137static int microcode_open(struct inode *unused1, struct file *unused2)
138{
139 cycle_kernel_lock();
140 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
141}
142
143static ssize_t microcode_write(struct file *file, const char __user *buf,
144 size_t len, loff_t *ppos)
145{
146 ssize_t ret;
147
148 if ((len >> PAGE_SHIFT) > num_physpages) {
149 printk(KERN_ERR "microcode: too much data (max %ld pages)\n",
150 num_physpages);
151 return -EINVAL;
152 }
153
154 get_online_cpus();
155 mutex_lock(&microcode_mutex);
156
157 ret = do_microcode_update(buf, len);
158 if (!ret)
159 ret = (ssize_t)len;
160
161 mutex_unlock(&microcode_mutex);
162 put_online_cpus();
163
164 return ret;
165}
166
167static const struct file_operations microcode_fops = {
168 .owner = THIS_MODULE,
169 .write = microcode_write,
170 .open = microcode_open,
171};
172
173static struct miscdevice microcode_dev = {
174 .minor = MICROCODE_MINOR,
175 .name = "microcode",
176 .fops = &microcode_fops,
177};
178
179static int __init microcode_dev_init(void)
180{
181 int error;
182
183 error = misc_register(&microcode_dev);
184 if (error) {
185 printk(KERN_ERR
186 "microcode: can't misc_register on minor=%d\n",
187 MICROCODE_MINOR);
188 return error;
189 }
190
191 return 0;
192}
193
194static void microcode_dev_exit(void)
195{
196 misc_deregister(&microcode_dev);
197}
198
199MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
200#else
201#define microcode_dev_init() 0
202#define microcode_dev_exit() do { } while (0)
203#endif
204
205/* fake device for request_firmware */
206struct platform_device *microcode_pdev;
207
208static ssize_t reload_store(struct sys_device *dev,
209 struct sysdev_attribute *attr,
210 const char *buf, size_t sz)
211{
212 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
213 char *end;
214 unsigned long val = simple_strtoul(buf, &end, 0);
215 int err = 0;
216 int cpu = dev->id;
217
218 if (end == buf)
219 return -EINVAL;
220 if (val == 1) {
221 cpumask_t old = current->cpus_allowed;
222
223 get_online_cpus();
224 if (cpu_online(cpu)) {
225 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
226 mutex_lock(&microcode_mutex);
227 if (uci->valid) {
228 err = microcode_ops->request_microcode_fw(cpu,
229 &microcode_pdev->dev);
230 if (!err)
231 microcode_ops->apply_microcode(cpu);
232 }
233 mutex_unlock(&microcode_mutex);
234 set_cpus_allowed_ptr(current, &old);
235 }
236 put_online_cpus();
237 }
238 if (err)
239 return err;
240 return sz;
241}
242
243static ssize_t version_show(struct sys_device *dev,
244 struct sysdev_attribute *attr, char *buf)
245{
246 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
247
248 return sprintf(buf, "0x%x\n", uci->cpu_sig.rev);
249}
250
251static ssize_t pf_show(struct sys_device *dev,
252 struct sysdev_attribute *attr, char *buf)
253{
254 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
255
256 return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
257}
258
259static SYSDEV_ATTR(reload, 0200, NULL, reload_store);
260static SYSDEV_ATTR(version, 0400, version_show, NULL);
261static SYSDEV_ATTR(processor_flags, 0400, pf_show, NULL);
262
263static struct attribute *mc_default_attrs[] = {
264 &attr_reload.attr,
265 &attr_version.attr,
266 &attr_processor_flags.attr,
267 NULL
268};
269
270static struct attribute_group mc_attr_group = {
271 .attrs = mc_default_attrs,
272 .name = "microcode",
273};
274
275static void microcode_fini_cpu(int cpu)
276{
277 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
278
279 mutex_lock(&microcode_mutex);
280 microcode_ops->microcode_fini_cpu(cpu);
281 uci->valid = 0;
282 mutex_unlock(&microcode_mutex);
283}
284
285static void collect_cpu_info(int cpu)
286{
287 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
288
289 memset(uci, 0, sizeof(*uci));
290 if (!microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig))
291 uci->valid = 1;
292}
293
294static int microcode_resume_cpu(int cpu)
295{
296 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
297 struct cpu_signature nsig;
298
299 pr_debug("microcode: CPU%d resumed\n", cpu);
300
301 if (!uci->mc)
302 return 1;
303
304 /*
305 * Let's verify that the 'cached' ucode does belong
306 * to this cpu (a bit of paranoia):
307 */
308 if (microcode_ops->collect_cpu_info(cpu, &nsig)) {
309 microcode_fini_cpu(cpu);
310 return -1;
311 }
312
313 if (memcmp(&nsig, &uci->cpu_sig, sizeof(nsig))) {
314 microcode_fini_cpu(cpu);
315 /* Should we look for a new ucode here? */
316 return 1;
317 }
318
319 return 0;
320}
321
322void microcode_update_cpu(int cpu)
323{
324 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
325 int err = 0;
326
327 /*
328 * Check if the system resume is in progress (uci->valid != NULL),
329 * otherwise just request a firmware:
330 */
331 if (uci->valid) {
332 err = microcode_resume_cpu(cpu);
333 } else {
334 collect_cpu_info(cpu);
335 if (uci->valid && system_state == SYSTEM_RUNNING)
336 err = microcode_ops->request_microcode_fw(cpu,
337 &microcode_pdev->dev);
338 }
339 if (!err)
340 microcode_ops->apply_microcode(cpu);
341}
342
343static void microcode_init_cpu(int cpu)
344{
345 cpumask_t old = current->cpus_allowed;
346
347 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
348 /* We should bind the task to the CPU */
349 BUG_ON(raw_smp_processor_id() != cpu);
350
351 mutex_lock(&microcode_mutex);
352 microcode_update_cpu(cpu);
353 mutex_unlock(&microcode_mutex);
354
355 set_cpus_allowed_ptr(current, &old);
356}
357
358static int mc_sysdev_add(struct sys_device *sys_dev)
359{
360 int err, cpu = sys_dev->id;
361 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
362
363 if (!cpu_online(cpu))
364 return 0;
365
366 pr_debug("microcode: CPU%d added\n", cpu);
367 memset(uci, 0, sizeof(*uci));
368
369 err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group);
370 if (err)
371 return err;
372
373 microcode_init_cpu(cpu);
374 return 0;
375}
376
377static int mc_sysdev_remove(struct sys_device *sys_dev)
378{
379 int cpu = sys_dev->id;
380
381 if (!cpu_online(cpu))
382 return 0;
383
384 pr_debug("microcode: CPU%d removed\n", cpu);
385 microcode_fini_cpu(cpu);
386 sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
387 return 0;
388}
389
390static int mc_sysdev_resume(struct sys_device *dev)
391{
392 int cpu = dev->id;
393
394 if (!cpu_online(cpu))
395 return 0;
396
397 /* only CPU 0 will apply ucode here */
398 microcode_update_cpu(0);
399 return 0;
400}
401
402static struct sysdev_driver mc_sysdev_driver = {
403 .add = mc_sysdev_add,
404 .remove = mc_sysdev_remove,
405 .resume = mc_sysdev_resume,
406};
407
408static __cpuinit int
409mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
410{
411 unsigned int cpu = (unsigned long)hcpu;
412 struct sys_device *sys_dev;
413
414 sys_dev = get_cpu_sysdev(cpu);
415 switch (action) {
416 case CPU_ONLINE:
417 case CPU_ONLINE_FROZEN:
418 microcode_init_cpu(cpu);
419 case CPU_DOWN_FAILED:
420 case CPU_DOWN_FAILED_FROZEN:
421 pr_debug("microcode: CPU%d added\n", cpu);
422 if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group))
423 printk(KERN_ERR "microcode: Failed to create the sysfs "
424 "group for CPU%d\n", cpu);
425 break;
426 case CPU_DOWN_PREPARE:
427 case CPU_DOWN_PREPARE_FROZEN:
428 /* Suspend is in progress, only remove the interface */
429 sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
430 pr_debug("microcode: CPU%d removed\n", cpu);
431 break;
432 case CPU_DEAD:
433 case CPU_UP_CANCELED_FROZEN:
434 /* The CPU refused to come up during a system resume */
435 microcode_fini_cpu(cpu);
436 break;
437 }
438 return NOTIFY_OK;
439}
440
441static struct notifier_block __refdata mc_cpu_notifier = {
442 .notifier_call = mc_cpu_callback,
443};
444
445static int __init microcode_init(void)
446{
447 struct cpuinfo_x86 *c = &cpu_data(0);
448 int error;
449
450 if (c->x86_vendor == X86_VENDOR_INTEL)
451 microcode_ops = init_intel_microcode();
452 else if (c->x86_vendor == X86_VENDOR_AMD)
453 microcode_ops = init_amd_microcode();
454
455 if (!microcode_ops) {
456 printk(KERN_ERR "microcode: no support for this CPU vendor\n");
457 return -ENODEV;
458 }
459
460 error = microcode_dev_init();
461 if (error)
462 return error;
463 microcode_pdev = platform_device_register_simple("microcode", -1,
464 NULL, 0);
465 if (IS_ERR(microcode_pdev)) {
466 microcode_dev_exit();
467 return PTR_ERR(microcode_pdev);
468 }
469
470 get_online_cpus();
471 error = sysdev_driver_register(&cpu_sysdev_class, &mc_sysdev_driver);
472 put_online_cpus();
473 if (error) {
474 microcode_dev_exit();
475 platform_device_unregister(microcode_pdev);
476 return error;
477 }
478
479 register_hotcpu_notifier(&mc_cpu_notifier);
480
481 printk(KERN_INFO
482 "Microcode Update Driver: v" MICROCODE_VERSION
483 " <tigran@aivazian.fsnet.co.uk>"
484 " <peter.oruba@amd.com>\n");
485
486 return 0;
487}
488
489static void __exit microcode_exit(void)
490{
491 microcode_dev_exit();
492
493 unregister_hotcpu_notifier(&mc_cpu_notifier);
494
495 get_online_cpus();
496 sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver);
497 put_online_cpus();
498
499 platform_device_unregister(microcode_pdev);
500
501 microcode_ops = NULL;
502
503 printk(KERN_INFO
504 "Microcode Update Driver: v" MICROCODE_VERSION " removed.\n");
505}
506
507module_init(microcode_init);
508module_exit(microcode_exit);
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
new file mode 100644
index 000000000000..622dc4a21784
--- /dev/null
+++ b/arch/x86/kernel/microcode_intel.c
@@ -0,0 +1,480 @@
1/*
2 * Intel CPU Microcode Update Driver for Linux
3 *
4 * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
5 * 2006 Shaohua Li <shaohua.li@intel.com>
6 *
7 * This driver allows to upgrade microcode on Intel processors
8 * belonging to IA-32 family - PentiumPro, Pentium II,
9 * Pentium III, Xeon, Pentium 4, etc.
10 *
11 * Reference: Section 8.11 of Volume 3a, IA-32 Intel? Architecture
12 * Software Developer's Manual
13 * Order Number 253668 or free download from:
14 *
15 * http://developer.intel.com/design/pentium4/manuals/253668.htm
16 *
17 * For more information, go to http://www.urbanmyth.org/microcode
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 * 1.0 16 Feb 2000, Tigran Aivazian <tigran@sco.com>
25 * Initial release.
26 * 1.01 18 Feb 2000, Tigran Aivazian <tigran@sco.com>
27 * Added read() support + cleanups.
28 * 1.02 21 Feb 2000, Tigran Aivazian <tigran@sco.com>
29 * Added 'device trimming' support. open(O_WRONLY) zeroes
30 * and frees the saved copy of applied microcode.
31 * 1.03 29 Feb 2000, Tigran Aivazian <tigran@sco.com>
32 * Made to use devfs (/dev/cpu/microcode) + cleanups.
33 * 1.04 06 Jun 2000, Simon Trimmer <simon@veritas.com>
34 * Added misc device support (now uses both devfs and misc).
35 * Added MICROCODE_IOCFREE ioctl to clear memory.
36 * 1.05 09 Jun 2000, Simon Trimmer <simon@veritas.com>
37 * Messages for error cases (non Intel & no suitable microcode).
38 * 1.06 03 Aug 2000, Tigran Aivazian <tigran@veritas.com>
39 * Removed ->release(). Removed exclusive open and status bitmap.
40 * Added microcode_rwsem to serialize read()/write()/ioctl().
41 * Removed global kernel lock usage.
42 * 1.07 07 Sep 2000, Tigran Aivazian <tigran@veritas.com>
43 * Write 0 to 0x8B msr and then cpuid before reading revision,
44 * so that it works even if there were no update done by the
45 * BIOS. Otherwise, reading from 0x8B gives junk (which happened
46 * to be 0 on my machine which is why it worked even when I
47 * disabled update by the BIOS)
48 * Thanks to Eric W. Biederman <ebiederman@lnxi.com> for the fix.
49 * 1.08 11 Dec 2000, Richard Schaal <richard.schaal@intel.com> and
50 * Tigran Aivazian <tigran@veritas.com>
51 * Intel Pentium 4 processor support and bugfixes.
52 * 1.09 30 Oct 2001, Tigran Aivazian <tigran@veritas.com>
53 * Bugfix for HT (Hyper-Threading) enabled processors
54 * whereby processor resources are shared by all logical processors
55 * in a single CPU package.
56 * 1.10 28 Feb 2002 Asit K Mallick <asit.k.mallick@intel.com> and
57 * Tigran Aivazian <tigran@veritas.com>,
58 * Serialize updates as required on HT processors due to
59 * speculative nature of implementation.
60 * 1.11 22 Mar 2002 Tigran Aivazian <tigran@veritas.com>
61 * Fix the panic when writing zero-length microcode chunk.
62 * 1.12 29 Sep 2003 Nitin Kamble <nitin.a.kamble@intel.com>,
63 * Jun Nakajima <jun.nakajima@intel.com>
64 * Support for the microcode updates in the new format.
65 * 1.13 10 Oct 2003 Tigran Aivazian <tigran@veritas.com>
66 * Removed ->read() method and obsoleted MICROCODE_IOCFREE ioctl
67 * because we no longer hold a copy of applied microcode
68 * in kernel memory.
69 * 1.14 25 Jun 2004 Tigran Aivazian <tigran@veritas.com>
70 * Fix sigmatch() macro to handle old CPUs with pf == 0.
71 * Thanks to Stuart Swales for pointing out this bug.
72 */
73#include <linux/capability.h>
74#include <linux/kernel.h>
75#include <linux/init.h>
76#include <linux/sched.h>
77#include <linux/smp_lock.h>
78#include <linux/cpumask.h>
79#include <linux/module.h>
80#include <linux/slab.h>
81#include <linux/vmalloc.h>
82#include <linux/miscdevice.h>
83#include <linux/spinlock.h>
84#include <linux/mm.h>
85#include <linux/fs.h>
86#include <linux/mutex.h>
87#include <linux/cpu.h>
88#include <linux/firmware.h>
89#include <linux/platform_device.h>
90
91#include <asm/msr.h>
92#include <asm/uaccess.h>
93#include <asm/processor.h>
94#include <asm/microcode.h>
95
96MODULE_DESCRIPTION("Microcode Update Driver");
97MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
98MODULE_LICENSE("GPL");
99
100struct microcode_header_intel {
101 unsigned int hdrver;
102 unsigned int rev;
103 unsigned int date;
104 unsigned int sig;
105 unsigned int cksum;
106 unsigned int ldrver;
107 unsigned int pf;
108 unsigned int datasize;
109 unsigned int totalsize;
110 unsigned int reserved[3];
111};
112
113struct microcode_intel {
114 struct microcode_header_intel hdr;
115 unsigned int bits[0];
116};
117
118/* microcode format is extended from prescott processors */
119struct extended_signature {
120 unsigned int sig;
121 unsigned int pf;
122 unsigned int cksum;
123};
124
125struct extended_sigtable {
126 unsigned int count;
127 unsigned int cksum;
128 unsigned int reserved[3];
129 struct extended_signature sigs[0];
130};
131
132#define DEFAULT_UCODE_DATASIZE (2000)
133#define MC_HEADER_SIZE (sizeof(struct microcode_header_intel))
134#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)
135#define EXT_HEADER_SIZE (sizeof(struct extended_sigtable))
136#define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature))
137#define DWSIZE (sizeof(u32))
138#define get_totalsize(mc) \
139 (((struct microcode_intel *)mc)->hdr.totalsize ? \
140 ((struct microcode_intel *)mc)->hdr.totalsize : \
141 DEFAULT_UCODE_TOTALSIZE)
142
143#define get_datasize(mc) \
144 (((struct microcode_intel *)mc)->hdr.datasize ? \
145 ((struct microcode_intel *)mc)->hdr.datasize : DEFAULT_UCODE_DATASIZE)
146
147#define sigmatch(s1, s2, p1, p2) \
148 (((s1) == (s2)) && (((p1) & (p2)) || (((p1) == 0) && ((p2) == 0))))
149
150#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
151
152/* serialize access to the physical write to MSR 0x79 */
153static DEFINE_SPINLOCK(microcode_update_lock);
154
155static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
156{
157 struct cpuinfo_x86 *c = &cpu_data(cpu_num);
158 unsigned int val[2];
159
160 memset(csig, 0, sizeof(*csig));
161
162 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
163 cpu_has(c, X86_FEATURE_IA64)) {
164 printk(KERN_ERR "microcode: CPU%d not a capable Intel "
165 "processor\n", cpu_num);
166 return -1;
167 }
168
169 csig->sig = cpuid_eax(0x00000001);
170
171 if ((c->x86_model >= 5) || (c->x86 > 6)) {
172 /* get processor flags from MSR 0x17 */
173 rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
174 csig->pf = 1 << ((val[1] >> 18) & 7);
175 }
176
177 wrmsr(MSR_IA32_UCODE_REV, 0, 0);
178 /* see notes above for revision 1.07. Apparent chip bug */
179 sync_core();
180 /* get the current revision from MSR 0x8B */
181 rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev);
182 pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n",
183 csig->sig, csig->pf, csig->rev);
184
185 return 0;
186}
187
188static inline int update_match_cpu(struct cpu_signature *csig, int sig, int pf)
189{
190 return (!sigmatch(sig, csig->sig, pf, csig->pf)) ? 0 : 1;
191}
192
193static inline int
194update_match_revision(struct microcode_header_intel *mc_header, int rev)
195{
196 return (mc_header->rev <= rev) ? 0 : 1;
197}
198
199static int microcode_sanity_check(void *mc)
200{
201 struct microcode_header_intel *mc_header = mc;
202 struct extended_sigtable *ext_header = NULL;
203 struct extended_signature *ext_sig;
204 unsigned long total_size, data_size, ext_table_size;
205 int sum, orig_sum, ext_sigcount = 0, i;
206
207 total_size = get_totalsize(mc_header);
208 data_size = get_datasize(mc_header);
209 if (data_size + MC_HEADER_SIZE > total_size) {
210 printk(KERN_ERR "microcode: error! "
211 "Bad data size in microcode data file\n");
212 return -EINVAL;
213 }
214
215 if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
216 printk(KERN_ERR "microcode: error! "
217 "Unknown microcode update format\n");
218 return -EINVAL;
219 }
220 ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
221 if (ext_table_size) {
222 if ((ext_table_size < EXT_HEADER_SIZE)
223 || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
224 printk(KERN_ERR "microcode: error! "
225 "Small exttable size in microcode data file\n");
226 return -EINVAL;
227 }
228 ext_header = mc + MC_HEADER_SIZE + data_size;
229 if (ext_table_size != exttable_size(ext_header)) {
230 printk(KERN_ERR "microcode: error! "
231 "Bad exttable size in microcode data file\n");
232 return -EFAULT;
233 }
234 ext_sigcount = ext_header->count;
235 }
236
237 /* check extended table checksum */
238 if (ext_table_size) {
239 int ext_table_sum = 0;
240 int *ext_tablep = (int *)ext_header;
241
242 i = ext_table_size / DWSIZE;
243 while (i--)
244 ext_table_sum += ext_tablep[i];
245 if (ext_table_sum) {
246 printk(KERN_WARNING "microcode: aborting, "
247 "bad extended signature table checksum\n");
248 return -EINVAL;
249 }
250 }
251
252 /* calculate the checksum */
253 orig_sum = 0;
254 i = (MC_HEADER_SIZE + data_size) / DWSIZE;
255 while (i--)
256 orig_sum += ((int *)mc)[i];
257 if (orig_sum) {
258 printk(KERN_ERR "microcode: aborting, bad checksum\n");
259 return -EINVAL;
260 }
261 if (!ext_table_size)
262 return 0;
263 /* check extended signature checksum */
264 for (i = 0; i < ext_sigcount; i++) {
265 ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
266 EXT_SIGNATURE_SIZE * i;
267 sum = orig_sum
268 - (mc_header->sig + mc_header->pf + mc_header->cksum)
269 + (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
270 if (sum) {
271 printk(KERN_ERR "microcode: aborting, bad checksum\n");
272 return -EINVAL;
273 }
274 }
275 return 0;
276}
277
278/*
279 * return 0 - no update found
280 * return 1 - found update
281 */
282static int
283get_matching_microcode(struct cpu_signature *cpu_sig, void *mc, int rev)
284{
285 struct microcode_header_intel *mc_header = mc;
286 struct extended_sigtable *ext_header;
287 unsigned long total_size = get_totalsize(mc_header);
288 int ext_sigcount, i;
289 struct extended_signature *ext_sig;
290
291 if (!update_match_revision(mc_header, rev))
292 return 0;
293
294 if (update_match_cpu(cpu_sig, mc_header->sig, mc_header->pf))
295 return 1;
296
297 /* Look for ext. headers: */
298 if (total_size <= get_datasize(mc_header) + MC_HEADER_SIZE)
299 return 0;
300
301 ext_header = mc + get_datasize(mc_header) + MC_HEADER_SIZE;
302 ext_sigcount = ext_header->count;
303 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
304
305 for (i = 0; i < ext_sigcount; i++) {
306 if (update_match_cpu(cpu_sig, ext_sig->sig, ext_sig->pf))
307 return 1;
308 ext_sig++;
309 }
310 return 0;
311}
312
313static void apply_microcode(int cpu)
314{
315 unsigned long flags;
316 unsigned int val[2];
317 int cpu_num = raw_smp_processor_id();
318 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
319 struct microcode_intel *mc_intel = uci->mc;
320
321 /* We should bind the task to the CPU */
322 BUG_ON(cpu_num != cpu);
323
324 if (mc_intel == NULL)
325 return;
326
327 /* serialize access to the physical write to MSR 0x79 */
328 spin_lock_irqsave(&microcode_update_lock, flags);
329
330 /* write microcode via MSR 0x79 */
331 wrmsr(MSR_IA32_UCODE_WRITE,
332 (unsigned long) mc_intel->bits,
333 (unsigned long) mc_intel->bits >> 16 >> 16);
334 wrmsr(MSR_IA32_UCODE_REV, 0, 0);
335
336 /* see notes above for revision 1.07. Apparent chip bug */
337 sync_core();
338
339 /* get the current revision from MSR 0x8B */
340 rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
341
342 spin_unlock_irqrestore(&microcode_update_lock, flags);
343 if (val[1] != mc_intel->hdr.rev) {
344 printk(KERN_ERR "microcode: CPU%d update from revision "
345 "0x%x to 0x%x failed\n", cpu_num, uci->cpu_sig.rev, val[1]);
346 return;
347 }
348 printk(KERN_INFO "microcode: CPU%d updated from revision "
349 "0x%x to 0x%x, date = %04x-%02x-%02x \n",
350 cpu_num, uci->cpu_sig.rev, val[1],
351 mc_intel->hdr.date & 0xffff,
352 mc_intel->hdr.date >> 24,
353 (mc_intel->hdr.date >> 16) & 0xff);
354 uci->cpu_sig.rev = val[1];
355}
356
357static int generic_load_microcode(int cpu, void *data, size_t size,
358 int (*get_ucode_data)(void *, const void *, size_t))
359{
360 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
361 u8 *ucode_ptr = data, *new_mc = NULL, *mc;
362 int new_rev = uci->cpu_sig.rev;
363 unsigned int leftover = size;
364
365 while (leftover) {
366 struct microcode_header_intel mc_header;
367 unsigned int mc_size;
368
369 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
370 break;
371
372 mc_size = get_totalsize(&mc_header);
373 if (!mc_size || mc_size > leftover) {
374 printk(KERN_ERR "microcode: error!"
375 "Bad data in microcode data file\n");
376 break;
377 }
378
379 mc = vmalloc(mc_size);
380 if (!mc)
381 break;
382
383 if (get_ucode_data(mc, ucode_ptr, mc_size) ||
384 microcode_sanity_check(mc) < 0) {
385 vfree(mc);
386 break;
387 }
388
389 if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) {
390 if (new_mc)
391 vfree(new_mc);
392 new_rev = mc_header.rev;
393 new_mc = mc;
394 } else
395 vfree(mc);
396
397 ucode_ptr += mc_size;
398 leftover -= mc_size;
399 }
400
401 if (new_mc) {
402 if (!leftover) {
403 if (uci->mc)
404 vfree(uci->mc);
405 uci->mc = (struct microcode_intel *)new_mc;
406 pr_debug("microcode: CPU%d found a matching microcode update with"
407 " version 0x%x (current=0x%x)\n",
408 cpu, new_rev, uci->cpu_sig.rev);
409 } else
410 vfree(new_mc);
411 }
412
413 return (int)leftover;
414}
415
416static int get_ucode_fw(void *to, const void *from, size_t n)
417{
418 memcpy(to, from, n);
419 return 0;
420}
421
422static int request_microcode_fw(int cpu, struct device *device)
423{
424 char name[30];
425 struct cpuinfo_x86 *c = &cpu_data(cpu);
426 const struct firmware *firmware;
427 int ret;
428
429 /* We should bind the task to the CPU */
430 BUG_ON(cpu != raw_smp_processor_id());
431 sprintf(name, "intel-ucode/%02x-%02x-%02x",
432 c->x86, c->x86_model, c->x86_mask);
433 ret = request_firmware(&firmware, name, device);
434 if (ret) {
435 pr_debug("microcode: data file %s load failed\n", name);
436 return ret;
437 }
438
439 ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size,
440 &get_ucode_fw);
441
442 release_firmware(firmware);
443
444 return ret;
445}
446
447static int get_ucode_user(void *to, const void *from, size_t n)
448{
449 return copy_from_user(to, from, n);
450}
451
452static int request_microcode_user(int cpu, const void __user *buf, size_t size)
453{
454 /* We should bind the task to the CPU */
455 BUG_ON(cpu != raw_smp_processor_id());
456
457 return generic_load_microcode(cpu, (void*)buf, size, &get_ucode_user);
458}
459
460static void microcode_fini_cpu(int cpu)
461{
462 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
463
464 vfree(uci->mc);
465 uci->mc = NULL;
466}
467
468struct microcode_ops microcode_intel_ops = {
469 .request_microcode_user = request_microcode_user,
470 .request_microcode_fw = request_microcode_fw,
471 .collect_cpu_info = collect_cpu_info,
472 .apply_microcode = apply_microcode,
473 .microcode_fini_cpu = microcode_fini_cpu,
474};
475
476struct microcode_ops * __init init_intel_microcode(void)
477{
478 return &microcode_intel_ops;
479}
480
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index b3fb430725cb..f98f4e1dba09 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -397,7 +397,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
397 generic_bigsmp_probe(); 397 generic_bigsmp_probe();
398#endif 398#endif
399 399
400#ifdef CONFIG_X86_32
400 setup_apic_routing(); 401 setup_apic_routing();
402#endif
401 if (!num_processors) 403 if (!num_processors)
402 printk(KERN_ERR "MPTABLE: no processors registered!\n"); 404 printk(KERN_ERR "MPTABLE: no processors registered!\n");
403 return num_processors; 405 return num_processors;
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 2e2af5d18191..82a7c7ed6d45 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -163,8 +163,8 @@ static int __cpuinit msr_device_create(int cpu)
163{ 163{
164 struct device *dev; 164 struct device *dev;
165 165
166 dev = device_create_drvdata(msr_class, NULL, MKDEV(MSR_MAJOR, cpu), 166 dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, cpu), NULL,
167 NULL, "msr%d", cpu); 167 "msr%d", cpu);
168 return IS_ERR(dev) ? PTR_ERR(dev) : 0; 168 return IS_ERR(dev) ? PTR_ERR(dev) : 0;
169} 169}
170 170
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index abb78a2cc4ad..2c97f07f1c2c 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -299,6 +299,15 @@ void acpi_nmi_disable(void)
299 on_each_cpu(__acpi_nmi_disable, NULL, 1); 299 on_each_cpu(__acpi_nmi_disable, NULL, 1);
300} 300}
301 301
302/*
303 * This function is called as soon the LAPIC NMI watchdog driver has everything
304 * in place and it's ready to check if the NMIs belong to the NMI watchdog
305 */
306void cpu_nmi_set_wd_enabled(void)
307{
308 __get_cpu_var(wd_enabled) = 1;
309}
310
302void setup_apic_nmi_watchdog(void *unused) 311void setup_apic_nmi_watchdog(void *unused)
303{ 312{
304 if (__get_cpu_var(wd_enabled)) 313 if (__get_cpu_var(wd_enabled))
@@ -311,8 +320,6 @@ void setup_apic_nmi_watchdog(void *unused)
311 320
312 switch (nmi_watchdog) { 321 switch (nmi_watchdog) {
313 case NMI_LOCAL_APIC: 322 case NMI_LOCAL_APIC:
314 /* enable it before to avoid race with handler */
315 __get_cpu_var(wd_enabled) = 1;
316 if (lapic_watchdog_init(nmi_hz) < 0) { 323 if (lapic_watchdog_init(nmi_hz) < 0) {
317 __get_cpu_var(wd_enabled) = 0; 324 __get_cpu_var(wd_enabled) = 0;
318 return; 325 return;
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c
index eecc8c18f010..4caff39078e0 100644
--- a/arch/x86/kernel/numaq_32.c
+++ b/arch/x86/kernel/numaq_32.c
@@ -229,6 +229,12 @@ static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable,
229 } 229 }
230} 230}
231 231
232static int __init numaq_setup_ioapic_ids(void)
233{
234 /* so can skip it */
235 return 1;
236}
237
232static struct x86_quirks numaq_x86_quirks __initdata = { 238static struct x86_quirks numaq_x86_quirks __initdata = {
233 .arch_pre_time_init = numaq_pre_time_init, 239 .arch_pre_time_init = numaq_pre_time_init,
234 .arch_time_init = NULL, 240 .arch_time_init = NULL,
@@ -243,6 +249,7 @@ static struct x86_quirks numaq_x86_quirks __initdata = {
243 .mpc_oem_bus_info = mpc_oem_bus_info, 249 .mpc_oem_bus_info = mpc_oem_bus_info,
244 .mpc_oem_pci_bus = mpc_oem_pci_bus, 250 .mpc_oem_pci_bus = mpc_oem_pci_bus,
245 .smp_read_mpc_oem = smp_read_mpc_oem, 251 .smp_read_mpc_oem = smp_read_mpc_oem,
252 .setup_ioapic_ids = numaq_setup_ioapic_ids,
246}; 253};
247 254
248void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, 255void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c
index 3e6672274807..7a13fac63a1f 100644
--- a/arch/x86/kernel/olpc.c
+++ b/arch/x86/kernel/olpc.c
@@ -190,12 +190,12 @@ EXPORT_SYMBOL_GPL(olpc_ec_cmd);
190static void __init platform_detect(void) 190static void __init platform_detect(void)
191{ 191{
192 size_t propsize; 192 size_t propsize;
193 u32 rev; 193 __be32 rev;
194 194
195 if (ofw("getprop", 4, 1, NULL, "board-revision-int", &rev, 4, 195 if (ofw("getprop", 4, 1, NULL, "board-revision-int", &rev, 4,
196 &propsize) || propsize != 4) { 196 &propsize) || propsize != 4) {
197 printk(KERN_ERR "ofw: getprop call failed!\n"); 197 printk(KERN_ERR "ofw: getprop call failed!\n");
198 rev = 0; 198 rev = cpu_to_be32(0);
199 } 199 }
200 olpc_platform_info.boardrev = be32_to_cpu(rev); 200 olpc_platform_info.boardrev = be32_to_cpu(rev);
201} 201}
@@ -203,7 +203,7 @@ static void __init platform_detect(void)
203static void __init platform_detect(void) 203static void __init platform_detect(void)
204{ 204{
205 /* stopgap until OFW support is added to the kernel */ 205 /* stopgap until OFW support is added to the kernel */
206 olpc_platform_info.boardrev = be32_to_cpu(0xc2); 206 olpc_platform_info.boardrev = 0xc2;
207} 207}
208#endif 208#endif
209 209
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
new file mode 100644
index 000000000000..0e9f1982b1dd
--- /dev/null
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -0,0 +1,37 @@
1/*
2 * Split spinlock implementation out into its own file, so it can be
3 * compiled in a FTRACE-compatible way.
4 */
5#include <linux/spinlock.h>
6#include <linux/module.h>
7
8#include <asm/paravirt.h>
9
10static void default_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags)
11{
12 __raw_spin_lock(lock);
13}
14
15struct pv_lock_ops pv_lock_ops = {
16#ifdef CONFIG_SMP
17 .spin_is_locked = __ticket_spin_is_locked,
18 .spin_is_contended = __ticket_spin_is_contended,
19
20 .spin_lock = __ticket_spin_lock,
21 .spin_lock_flags = default_spin_lock_flags,
22 .spin_trylock = __ticket_spin_trylock,
23 .spin_unlock = __ticket_spin_unlock,
24#endif
25};
26EXPORT_SYMBOL(pv_lock_ops);
27
28void __init paravirt_use_bytelocks(void)
29{
30#ifdef CONFIG_SMP
31 pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
32 pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
33 pv_lock_ops.spin_lock = __byte_spin_lock;
34 pv_lock_ops.spin_trylock = __byte_spin_trylock;
35 pv_lock_ops.spin_unlock = __byte_spin_unlock;
36#endif
37}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 300da17e61cb..e4c8fb608873 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -268,17 +268,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
268 return __get_cpu_var(paravirt_lazy_mode); 268 return __get_cpu_var(paravirt_lazy_mode);
269} 269}
270 270
271void __init paravirt_use_bytelocks(void)
272{
273#ifdef CONFIG_SMP
274 pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
275 pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
276 pv_lock_ops.spin_lock = __byte_spin_lock;
277 pv_lock_ops.spin_trylock = __byte_spin_trylock;
278 pv_lock_ops.spin_unlock = __byte_spin_unlock;
279#endif
280}
281
282struct pv_info pv_info = { 271struct pv_info pv_info = {
283 .name = "bare hardware", 272 .name = "bare hardware",
284 .paravirt_enabled = 0, 273 .paravirt_enabled = 0,
@@ -330,6 +319,7 @@ struct pv_cpu_ops pv_cpu_ops = {
330#endif 319#endif
331 .wbinvd = native_wbinvd, 320 .wbinvd = native_wbinvd,
332 .read_msr = native_read_msr_safe, 321 .read_msr = native_read_msr_safe,
322 .read_msr_amd = native_read_msr_amd_safe,
333 .write_msr = native_write_msr_safe, 323 .write_msr = native_write_msr_safe,
334 .read_tsc = native_read_tsc, 324 .read_tsc = native_read_tsc,
335 .read_pmc = native_read_pmc, 325 .read_pmc = native_read_pmc,
@@ -348,6 +338,10 @@ struct pv_cpu_ops pv_cpu_ops = {
348 .write_ldt_entry = native_write_ldt_entry, 338 .write_ldt_entry = native_write_ldt_entry,
349 .write_gdt_entry = native_write_gdt_entry, 339 .write_gdt_entry = native_write_gdt_entry,
350 .write_idt_entry = native_write_idt_entry, 340 .write_idt_entry = native_write_idt_entry,
341
342 .alloc_ldt = paravirt_nop,
343 .free_ldt = paravirt_nop,
344
351 .load_sp0 = native_load_sp0, 345 .load_sp0 = native_load_sp0,
352 346
353#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) 347#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
@@ -373,8 +367,6 @@ struct pv_cpu_ops pv_cpu_ops = {
373 367
374struct pv_apic_ops pv_apic_ops = { 368struct pv_apic_ops pv_apic_ops = {
375#ifdef CONFIG_X86_LOCAL_APIC 369#ifdef CONFIG_X86_LOCAL_APIC
376 .apic_write = native_apic_write,
377 .apic_read = native_apic_read,
378 .setup_boot_clock = setup_boot_APIC_clock, 370 .setup_boot_clock = setup_boot_APIC_clock,
379 .setup_secondary_clock = setup_secondary_APIC_clock, 371 .setup_secondary_clock = setup_secondary_APIC_clock,
380 .startup_ipi_hook = paravirt_nop, 372 .startup_ipi_hook = paravirt_nop,
@@ -461,18 +453,6 @@ struct pv_mmu_ops pv_mmu_ops = {
461 .set_fixmap = native_set_fixmap, 453 .set_fixmap = native_set_fixmap,
462}; 454};
463 455
464struct pv_lock_ops pv_lock_ops = {
465#ifdef CONFIG_SMP
466 .spin_is_locked = __ticket_spin_is_locked,
467 .spin_is_contended = __ticket_spin_is_contended,
468
469 .spin_lock = __ticket_spin_lock,
470 .spin_trylock = __ticket_spin_trylock,
471 .spin_unlock = __ticket_spin_unlock,
472#endif
473};
474EXPORT_SYMBOL(pv_lock_ops);
475
476EXPORT_SYMBOL_GPL(pv_time_ops); 456EXPORT_SYMBOL_GPL(pv_time_ops);
477EXPORT_SYMBOL (pv_cpu_ops); 457EXPORT_SYMBOL (pv_cpu_ops);
478EXPORT_SYMBOL (pv_mmu_ops); 458EXPORT_SYMBOL (pv_mmu_ops);
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index 58262218781b..9fe644f4861d 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -23,7 +23,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
23 start = start_##ops##_##x; \ 23 start = start_##ops##_##x; \
24 end = end_##ops##_##x; \ 24 end = end_##ops##_##x; \
25 goto patch_site 25 goto patch_site
26 switch(type) { 26 switch (type) {
27 PATCH_SITE(pv_irq_ops, irq_disable); 27 PATCH_SITE(pv_irq_ops, irq_disable);
28 PATCH_SITE(pv_irq_ops, irq_enable); 28 PATCH_SITE(pv_irq_ops, irq_enable);
29 PATCH_SITE(pv_irq_ops, restore_fl); 29 PATCH_SITE(pv_irq_ops, restore_fl);
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index dcdac6c826e9..e1e731d78f38 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -217,16 +217,6 @@ static inline unsigned long verify_bit_range(unsigned long* bitmap,
217 217
218#endif /* CONFIG_IOMMU_DEBUG */ 218#endif /* CONFIG_IOMMU_DEBUG */
219 219
220static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen)
221{
222 unsigned int npages;
223
224 npages = PAGE_ALIGN(dma + dmalen) - (dma & PAGE_MASK);
225 npages >>= PAGE_SHIFT;
226
227 return npages;
228}
229
230static inline int translation_enabled(struct iommu_table *tbl) 220static inline int translation_enabled(struct iommu_table *tbl)
231{ 221{
232 /* only PHBs with translation enabled have an IOMMU table */ 222 /* only PHBs with translation enabled have an IOMMU table */
@@ -261,7 +251,7 @@ static void iommu_range_reserve(struct iommu_table *tbl,
261 badbit, tbl, start_addr, npages); 251 badbit, tbl, start_addr, npages);
262 } 252 }
263 253
264 set_bit_string(tbl->it_map, index, npages); 254 iommu_area_reserve(tbl->it_map, index, npages);
265 255
266 spin_unlock_irqrestore(&tbl->it_lock, flags); 256 spin_unlock_irqrestore(&tbl->it_lock, flags);
267} 257}
@@ -408,7 +398,7 @@ static void calgary_unmap_sg(struct device *dev,
408 if (dmalen == 0) 398 if (dmalen == 0)
409 break; 399 break;
410 400
411 npages = num_dma_pages(dma, dmalen); 401 npages = iommu_num_pages(dma, dmalen, PAGE_SIZE);
412 iommu_free(tbl, dma, npages); 402 iommu_free(tbl, dma, npages);
413 } 403 }
414} 404}
@@ -427,7 +417,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
427 BUG_ON(!sg_page(s)); 417 BUG_ON(!sg_page(s));
428 418
429 vaddr = (unsigned long) sg_virt(s); 419 vaddr = (unsigned long) sg_virt(s);
430 npages = num_dma_pages(vaddr, s->length); 420 npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
431 421
432 entry = iommu_range_alloc(dev, tbl, npages); 422 entry = iommu_range_alloc(dev, tbl, npages);
433 if (entry == bad_dma_address) { 423 if (entry == bad_dma_address) {
@@ -464,7 +454,7 @@ static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
464 struct iommu_table *tbl = find_iommu_table(dev); 454 struct iommu_table *tbl = find_iommu_table(dev);
465 455
466 uaddr = (unsigned long)vaddr; 456 uaddr = (unsigned long)vaddr;
467 npages = num_dma_pages(uaddr, size); 457 npages = iommu_num_pages(uaddr, size, PAGE_SIZE);
468 458
469 return iommu_alloc(dev, tbl, vaddr, npages, direction); 459 return iommu_alloc(dev, tbl, vaddr, npages, direction);
470} 460}
@@ -475,7 +465,7 @@ static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
475 struct iommu_table *tbl = find_iommu_table(dev); 465 struct iommu_table *tbl = find_iommu_table(dev);
476 unsigned int npages; 466 unsigned int npages;
477 467
478 npages = num_dma_pages(dma_handle, size); 468 npages = iommu_num_pages(dma_handle, size, PAGE_SIZE);
479 iommu_free(tbl, dma_handle, npages); 469 iommu_free(tbl, dma_handle, npages);
480} 470}
481 471
@@ -491,6 +481,8 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
491 npages = size >> PAGE_SHIFT; 481 npages = size >> PAGE_SHIFT;
492 order = get_order(size); 482 order = get_order(size);
493 483
484 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
485
494 /* alloc enough pages (and possibly more) */ 486 /* alloc enough pages (and possibly more) */
495 ret = (void *)__get_free_pages(flag, order); 487 ret = (void *)__get_free_pages(flag, order);
496 if (!ret) 488 if (!ret)
@@ -510,8 +502,22 @@ error:
510 return ret; 502 return ret;
511} 503}
512 504
505static void calgary_free_coherent(struct device *dev, size_t size,
506 void *vaddr, dma_addr_t dma_handle)
507{
508 unsigned int npages;
509 struct iommu_table *tbl = find_iommu_table(dev);
510
511 size = PAGE_ALIGN(size);
512 npages = size >> PAGE_SHIFT;
513
514 iommu_free(tbl, dma_handle, npages);
515 free_pages((unsigned long)vaddr, get_order(size));
516}
517
513static struct dma_mapping_ops calgary_dma_ops = { 518static struct dma_mapping_ops calgary_dma_ops = {
514 .alloc_coherent = calgary_alloc_coherent, 519 .alloc_coherent = calgary_alloc_coherent,
520 .free_coherent = calgary_free_coherent,
515 .map_single = calgary_map_single, 521 .map_single = calgary_map_single,
516 .unmap_single = calgary_unmap_single, 522 .unmap_single = calgary_unmap_single,
517 .map_sg = calgary_map_sg, 523 .map_sg = calgary_map_sg,
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 87d4d6964ec2..192624820217 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -41,11 +41,12 @@ EXPORT_SYMBOL(bad_dma_address);
41/* Dummy device used for NULL arguments (normally ISA). Better would 41/* Dummy device used for NULL arguments (normally ISA). Better would
42 be probably a smaller DMA mask, but this is bug-to-bug compatible 42 be probably a smaller DMA mask, but this is bug-to-bug compatible
43 to older i386. */ 43 to older i386. */
44struct device fallback_dev = { 44struct device x86_dma_fallback_dev = {
45 .bus_id = "fallback device", 45 .bus_id = "fallback device",
46 .coherent_dma_mask = DMA_32BIT_MASK, 46 .coherent_dma_mask = DMA_32BIT_MASK,
47 .dma_mask = &fallback_dev.coherent_dma_mask, 47 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
48}; 48};
49EXPORT_SYMBOL(x86_dma_fallback_dev);
49 50
50int dma_set_mask(struct device *dev, u64 mask) 51int dma_set_mask(struct device *dev, u64 mask)
51{ 52{
@@ -82,7 +83,7 @@ void __init dma32_reserve_bootmem(void)
82 * using 512M as goal 83 * using 512M as goal
83 */ 84 */
84 align = 64ULL<<20; 85 align = 64ULL<<20;
85 size = round_up(dma32_bootmem_size, align); 86 size = roundup(dma32_bootmem_size, align);
86 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, 87 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
87 512ULL<<20); 88 512ULL<<20);
88 if (dma32_bootmem_ptr) 89 if (dma32_bootmem_ptr)
@@ -124,15 +125,46 @@ void __init pci_iommu_alloc(void)
124 pci_swiotlb_init(); 125 pci_swiotlb_init();
125} 126}
126 127
127unsigned long iommu_num_pages(unsigned long addr, unsigned long len) 128unsigned long iommu_nr_pages(unsigned long addr, unsigned long len)
128{ 129{
129 unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE); 130 unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
130 131
131 return size >> PAGE_SHIFT; 132 return size >> PAGE_SHIFT;
132} 133}
133EXPORT_SYMBOL(iommu_num_pages); 134EXPORT_SYMBOL(iommu_nr_pages);
134#endif 135#endif
135 136
137void *dma_generic_alloc_coherent(struct device *dev, size_t size,
138 dma_addr_t *dma_addr, gfp_t flag)
139{
140 unsigned long dma_mask;
141 struct page *page;
142 dma_addr_t addr;
143
144 dma_mask = dma_alloc_coherent_mask(dev, flag);
145
146 flag |= __GFP_ZERO;
147again:
148 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
149 if (!page)
150 return NULL;
151
152 addr = page_to_phys(page);
153 if (!is_buffer_dma_capable(dma_mask, addr, size)) {
154 __free_pages(page, get_order(size));
155
156 if (dma_mask < DMA_32BIT_MASK && !(flag & GFP_DMA)) {
157 flag = (flag & ~GFP_DMA32) | GFP_DMA;
158 goto again;
159 }
160
161 return NULL;
162 }
163
164 *dma_addr = addr;
165 return page_address(page);
166}
167
136/* 168/*
137 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter 169 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
138 * documentation. 170 * documentation.
@@ -241,147 +273,6 @@ int dma_supported(struct device *dev, u64 mask)
241} 273}
242EXPORT_SYMBOL(dma_supported); 274EXPORT_SYMBOL(dma_supported);
243 275
244/* Allocate DMA memory on node near device */
245static noinline struct page *
246dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
247{
248 int node;
249
250 node = dev_to_node(dev);
251
252 return alloc_pages_node(node, gfp, order);
253}
254
255/*
256 * Allocate memory for a coherent mapping.
257 */
258void *
259dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
260 gfp_t gfp)
261{
262 struct dma_mapping_ops *ops = get_dma_ops(dev);
263 void *memory = NULL;
264 struct page *page;
265 unsigned long dma_mask = 0;
266 dma_addr_t bus;
267 int noretry = 0;
268
269 /* ignore region specifiers */
270 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
271
272 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
273 return memory;
274
275 if (!dev) {
276 dev = &fallback_dev;
277 gfp |= GFP_DMA;
278 }
279 dma_mask = dev->coherent_dma_mask;
280 if (dma_mask == 0)
281 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
282
283 /* Device not DMA able */
284 if (dev->dma_mask == NULL)
285 return NULL;
286
287 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
288 if (gfp & __GFP_DMA)
289 noretry = 1;
290
291#ifdef CONFIG_X86_64
292 /* Why <=? Even when the mask is smaller than 4GB it is often
293 larger than 16MB and in this case we have a chance of
294 finding fitting memory in the next higher zone first. If
295 not retry with true GFP_DMA. -AK */
296 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
297 gfp |= GFP_DMA32;
298 if (dma_mask < DMA_32BIT_MASK)
299 noretry = 1;
300 }
301#endif
302
303 again:
304 page = dma_alloc_pages(dev,
305 noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
306 if (page == NULL)
307 return NULL;
308
309 {
310 int high, mmu;
311 bus = page_to_phys(page);
312 memory = page_address(page);
313 high = (bus + size) >= dma_mask;
314 mmu = high;
315 if (force_iommu && !(gfp & GFP_DMA))
316 mmu = 1;
317 else if (high) {
318 free_pages((unsigned long)memory,
319 get_order(size));
320
321 /* Don't use the 16MB ZONE_DMA unless absolutely
322 needed. It's better to use remapping first. */
323 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
324 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
325 goto again;
326 }
327
328 /* Let low level make its own zone decisions */
329 gfp &= ~(GFP_DMA32|GFP_DMA);
330
331 if (ops->alloc_coherent)
332 return ops->alloc_coherent(dev, size,
333 dma_handle, gfp);
334 return NULL;
335 }
336
337 memset(memory, 0, size);
338 if (!mmu) {
339 *dma_handle = bus;
340 return memory;
341 }
342 }
343
344 if (ops->alloc_coherent) {
345 free_pages((unsigned long)memory, get_order(size));
346 gfp &= ~(GFP_DMA|GFP_DMA32);
347 return ops->alloc_coherent(dev, size, dma_handle, gfp);
348 }
349
350 if (ops->map_simple) {
351 *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
352 size,
353 PCI_DMA_BIDIRECTIONAL);
354 if (*dma_handle != bad_dma_address)
355 return memory;
356 }
357
358 if (panic_on_overflow)
359 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
360 (unsigned long)size);
361 free_pages((unsigned long)memory, get_order(size));
362 return NULL;
363}
364EXPORT_SYMBOL(dma_alloc_coherent);
365
366/*
367 * Unmap coherent memory.
368 * The caller must ensure that the device has finished accessing the mapping.
369 */
370void dma_free_coherent(struct device *dev, size_t size,
371 void *vaddr, dma_addr_t bus)
372{
373 struct dma_mapping_ops *ops = get_dma_ops(dev);
374
375 int order = get_order(size);
376 WARN_ON(irqs_disabled()); /* for portability */
377 if (dma_release_from_coherent(dev, order, vaddr))
378 return;
379 if (ops->unmap_single)
380 ops->unmap_single(dev, bus, size, 0);
381 free_pages((unsigned long)vaddr, order);
382}
383EXPORT_SYMBOL(dma_free_coherent);
384
385static int __init pci_iommu_init(void) 276static int __init pci_iommu_init(void)
386{ 277{
387 calgary_iommu_init(); 278 calgary_iommu_init();
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index be33a5442d82..e3f75bbcedea 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -27,8 +27,8 @@
27#include <linux/scatterlist.h> 27#include <linux/scatterlist.h>
28#include <linux/iommu-helper.h> 28#include <linux/iommu-helper.h>
29#include <linux/sysdev.h> 29#include <linux/sysdev.h>
30#include <linux/io.h>
30#include <asm/atomic.h> 31#include <asm/atomic.h>
31#include <asm/io.h>
32#include <asm/mtrr.h> 32#include <asm/mtrr.h>
33#include <asm/pgtable.h> 33#include <asm/pgtable.h>
34#include <asm/proto.h> 34#include <asm/proto.h>
@@ -80,9 +80,10 @@ AGPEXTERN int agp_memory_reserved;
80AGPEXTERN __u32 *agp_gatt_table; 80AGPEXTERN __u32 *agp_gatt_table;
81 81
82static unsigned long next_bit; /* protected by iommu_bitmap_lock */ 82static unsigned long next_bit; /* protected by iommu_bitmap_lock */
83static int need_flush; /* global flush state. set for each gart wrap */ 83static bool need_flush; /* global flush state. set for each gart wrap */
84 84
85static unsigned long alloc_iommu(struct device *dev, int size) 85static unsigned long alloc_iommu(struct device *dev, int size,
86 unsigned long align_mask)
86{ 87{
87 unsigned long offset, flags; 88 unsigned long offset, flags;
88 unsigned long boundary_size; 89 unsigned long boundary_size;
@@ -90,26 +91,27 @@ static unsigned long alloc_iommu(struct device *dev, int size)
90 91
91 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), 92 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
92 PAGE_SIZE) >> PAGE_SHIFT; 93 PAGE_SIZE) >> PAGE_SHIFT;
93 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 94 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
94 PAGE_SIZE) >> PAGE_SHIFT; 95 PAGE_SIZE) >> PAGE_SHIFT;
95 96
96 spin_lock_irqsave(&iommu_bitmap_lock, flags); 97 spin_lock_irqsave(&iommu_bitmap_lock, flags);
97 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, 98 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
98 size, base_index, boundary_size, 0); 99 size, base_index, boundary_size, align_mask);
99 if (offset == -1) { 100 if (offset == -1) {
100 need_flush = 1; 101 need_flush = true;
101 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, 102 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
102 size, base_index, boundary_size, 0); 103 size, base_index, boundary_size,
104 align_mask);
103 } 105 }
104 if (offset != -1) { 106 if (offset != -1) {
105 next_bit = offset+size; 107 next_bit = offset+size;
106 if (next_bit >= iommu_pages) { 108 if (next_bit >= iommu_pages) {
107 next_bit = 0; 109 next_bit = 0;
108 need_flush = 1; 110 need_flush = true;
109 } 111 }
110 } 112 }
111 if (iommu_fullflush) 113 if (iommu_fullflush)
112 need_flush = 1; 114 need_flush = true;
113 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 115 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
114 116
115 return offset; 117 return offset;
@@ -134,7 +136,7 @@ static void flush_gart(void)
134 spin_lock_irqsave(&iommu_bitmap_lock, flags); 136 spin_lock_irqsave(&iommu_bitmap_lock, flags);
135 if (need_flush) { 137 if (need_flush) {
136 k8_flush_garts(); 138 k8_flush_garts();
137 need_flush = 0; 139 need_flush = false;
138 } 140 }
139 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 141 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
140} 142}
@@ -173,7 +175,8 @@ static void dump_leak(void)
173 iommu_leak_pages); 175 iommu_leak_pages);
174 for (i = 0; i < iommu_leak_pages; i += 2) { 176 for (i = 0; i < iommu_leak_pages; i += 2) {
175 printk(KERN_DEBUG "%lu: ", iommu_pages-i); 177 printk(KERN_DEBUG "%lu: ", iommu_pages-i);
176 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0); 178 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i],
179 0);
177 printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' '); 180 printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
178 } 181 }
179 printk(KERN_DEBUG "\n"); 182 printk(KERN_DEBUG "\n");
@@ -212,34 +215,24 @@ static void iommu_full(struct device *dev, size_t size, int dir)
212static inline int 215static inline int
213need_iommu(struct device *dev, unsigned long addr, size_t size) 216need_iommu(struct device *dev, unsigned long addr, size_t size)
214{ 217{
215 u64 mask = *dev->dma_mask; 218 return force_iommu ||
216 int high = addr + size > mask; 219 !is_buffer_dma_capable(*dev->dma_mask, addr, size);
217 int mmu = high;
218
219 if (force_iommu)
220 mmu = 1;
221
222 return mmu;
223} 220}
224 221
225static inline int 222static inline int
226nonforced_iommu(struct device *dev, unsigned long addr, size_t size) 223nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
227{ 224{
228 u64 mask = *dev->dma_mask; 225 return !is_buffer_dma_capable(*dev->dma_mask, addr, size);
229 int high = addr + size > mask;
230 int mmu = high;
231
232 return mmu;
233} 226}
234 227
235/* Map a single continuous physical area into the IOMMU. 228/* Map a single continuous physical area into the IOMMU.
236 * Caller needs to check if the iommu is needed and flush. 229 * Caller needs to check if the iommu is needed and flush.
237 */ 230 */
238static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, 231static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
239 size_t size, int dir) 232 size_t size, int dir, unsigned long align_mask)
240{ 233{
241 unsigned long npages = iommu_num_pages(phys_mem, size); 234 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
242 unsigned long iommu_page = alloc_iommu(dev, npages); 235 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
243 int i; 236 int i;
244 237
245 if (iommu_page == -1) { 238 if (iommu_page == -1) {
@@ -259,16 +252,6 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
259 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); 252 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
260} 253}
261 254
262static dma_addr_t
263gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
264{
265 dma_addr_t map = dma_map_area(dev, paddr, size, dir);
266
267 flush_gart();
268
269 return map;
270}
271
272/* Map a single area into the IOMMU */ 255/* Map a single area into the IOMMU */
273static dma_addr_t 256static dma_addr_t
274gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) 257gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
@@ -276,12 +259,13 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
276 unsigned long bus; 259 unsigned long bus;
277 260
278 if (!dev) 261 if (!dev)
279 dev = &fallback_dev; 262 dev = &x86_dma_fallback_dev;
280 263
281 if (!need_iommu(dev, paddr, size)) 264 if (!need_iommu(dev, paddr, size))
282 return paddr; 265 return paddr;
283 266
284 bus = gart_map_simple(dev, paddr, size, dir); 267 bus = dma_map_area(dev, paddr, size, dir, 0);
268 flush_gart();
285 269
286 return bus; 270 return bus;
287} 271}
@@ -301,7 +285,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
301 return; 285 return;
302 286
303 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; 287 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
304 npages = iommu_num_pages(dma_addr, size); 288 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
305 for (i = 0; i < npages; i++) { 289 for (i = 0; i < npages; i++) {
306 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; 290 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
307 CLEAR_LEAK(iommu_page + i); 291 CLEAR_LEAK(iommu_page + i);
@@ -340,7 +324,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
340 unsigned long addr = sg_phys(s); 324 unsigned long addr = sg_phys(s);
341 325
342 if (nonforced_iommu(dev, addr, s->length)) { 326 if (nonforced_iommu(dev, addr, s->length)) {
343 addr = dma_map_area(dev, addr, s->length, dir); 327 addr = dma_map_area(dev, addr, s->length, dir, 0);
344 if (addr == bad_dma_address) { 328 if (addr == bad_dma_address) {
345 if (i > 0) 329 if (i > 0)
346 gart_unmap_sg(dev, sg, i, dir); 330 gart_unmap_sg(dev, sg, i, dir);
@@ -362,7 +346,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start,
362 int nelems, struct scatterlist *sout, 346 int nelems, struct scatterlist *sout,
363 unsigned long pages) 347 unsigned long pages)
364{ 348{
365 unsigned long iommu_start = alloc_iommu(dev, pages); 349 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
366 unsigned long iommu_page = iommu_start; 350 unsigned long iommu_page = iommu_start;
367 struct scatterlist *s; 351 struct scatterlist *s;
368 int i; 352 int i;
@@ -384,7 +368,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start,
384 } 368 }
385 369
386 addr = phys_addr; 370 addr = phys_addr;
387 pages = iommu_num_pages(s->offset, s->length); 371 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
388 while (pages--) { 372 while (pages--) {
389 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); 373 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
390 SET_LEAK(iommu_page); 374 SET_LEAK(iommu_page);
@@ -427,7 +411,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
427 return 0; 411 return 0;
428 412
429 if (!dev) 413 if (!dev)
430 dev = &fallback_dev; 414 dev = &x86_dma_fallback_dev;
431 415
432 out = 0; 416 out = 0;
433 start = 0; 417 start = 0;
@@ -467,7 +451,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
467 451
468 seg_size += s->length; 452 seg_size += s->length;
469 need = nextneed; 453 need = nextneed;
470 pages += iommu_num_pages(s->offset, s->length); 454 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
471 ps = s; 455 ps = s;
472 } 456 }
473 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) 457 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
@@ -499,6 +483,46 @@ error:
499 return 0; 483 return 0;
500} 484}
501 485
486/* allocate and map a coherent mapping */
487static void *
488gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
489 gfp_t flag)
490{
491 dma_addr_t paddr;
492 unsigned long align_mask;
493 struct page *page;
494
495 if (force_iommu && !(flag & GFP_DMA)) {
496 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
497 page = alloc_pages(flag | __GFP_ZERO, get_order(size));
498 if (!page)
499 return NULL;
500
501 align_mask = (1UL << get_order(size)) - 1;
502 paddr = dma_map_area(dev, page_to_phys(page), size,
503 DMA_BIDIRECTIONAL, align_mask);
504
505 flush_gart();
506 if (paddr != bad_dma_address) {
507 *dma_addr = paddr;
508 return page_address(page);
509 }
510 __free_pages(page, get_order(size));
511 } else
512 return dma_generic_alloc_coherent(dev, size, dma_addr, flag);
513
514 return NULL;
515}
516
517/* free a coherent mapping */
518static void
519gart_free_coherent(struct device *dev, size_t size, void *vaddr,
520 dma_addr_t dma_addr)
521{
522 gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
523 free_pages((unsigned long)vaddr, get_order(size));
524}
525
502static int no_agp; 526static int no_agp;
503 527
504static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) 528static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
@@ -649,13 +673,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
649 info->aper_size = aper_size >> 20; 673 info->aper_size = aper_size >> 20;
650 674
651 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); 675 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
652 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); 676 gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
677 get_order(gatt_size));
653 if (!gatt) 678 if (!gatt)
654 panic("Cannot allocate GATT table"); 679 panic("Cannot allocate GATT table");
655 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) 680 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
656 panic("Could not set GART PTEs to uncacheable pages"); 681 panic("Could not set GART PTEs to uncacheable pages");
657 682
658 memset(gatt, 0, gatt_size);
659 agp_gatt_table = gatt; 683 agp_gatt_table = gatt;
660 684
661 enable_gart_translations(); 685 enable_gart_translations();
@@ -664,7 +688,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
664 if (!error) 688 if (!error)
665 error = sysdev_register(&device_gart); 689 error = sysdev_register(&device_gart);
666 if (error) 690 if (error)
667 panic("Could not register gart_sysdev -- would corrupt data on next suspend"); 691 panic("Could not register gart_sysdev -- "
692 "would corrupt data on next suspend");
668 693
669 flush_gart(); 694 flush_gart();
670 695
@@ -680,20 +705,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
680 return -1; 705 return -1;
681} 706}
682 707
683extern int agp_amd64_init(void);
684
685static struct dma_mapping_ops gart_dma_ops = { 708static struct dma_mapping_ops gart_dma_ops = {
686 .map_single = gart_map_single, 709 .map_single = gart_map_single,
687 .map_simple = gart_map_simple,
688 .unmap_single = gart_unmap_single, 710 .unmap_single = gart_unmap_single,
689 .sync_single_for_cpu = NULL,
690 .sync_single_for_device = NULL,
691 .sync_single_range_for_cpu = NULL,
692 .sync_single_range_for_device = NULL,
693 .sync_sg_for_cpu = NULL,
694 .sync_sg_for_device = NULL,
695 .map_sg = gart_map_sg, 711 .map_sg = gart_map_sg,
696 .unmap_sg = gart_unmap_sg, 712 .unmap_sg = gart_unmap_sg,
713 .alloc_coherent = gart_alloc_coherent,
714 .free_coherent = gart_free_coherent,
697}; 715};
698 716
699void gart_iommu_shutdown(void) 717void gart_iommu_shutdown(void)
@@ -753,8 +771,8 @@ void __init gart_iommu_init(void)
753 (no_agp && init_k8_gatt(&info) < 0)) { 771 (no_agp && init_k8_gatt(&info) < 0)) {
754 if (max_pfn > MAX_DMA32_PFN) { 772 if (max_pfn > MAX_DMA32_PFN) {
755 printk(KERN_WARNING "More than 4GB of memory " 773 printk(KERN_WARNING "More than 4GB of memory "
756 "but GART IOMMU not available.\n" 774 "but GART IOMMU not available.\n");
757 KERN_WARNING "falling back to iommu=soft.\n"); 775 printk(KERN_WARNING "falling back to iommu=soft.\n");
758 } 776 }
759 return; 777 return;
760 } 778 }
@@ -772,19 +790,16 @@ void __init gart_iommu_init(void)
772 iommu_size = check_iommu_size(info.aper_base, aper_size); 790 iommu_size = check_iommu_size(info.aper_base, aper_size);
773 iommu_pages = iommu_size >> PAGE_SHIFT; 791 iommu_pages = iommu_size >> PAGE_SHIFT;
774 792
775 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL, 793 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
776 get_order(iommu_pages/8)); 794 get_order(iommu_pages/8));
777 if (!iommu_gart_bitmap) 795 if (!iommu_gart_bitmap)
778 panic("Cannot allocate iommu bitmap\n"); 796 panic("Cannot allocate iommu bitmap\n");
779 memset(iommu_gart_bitmap, 0, iommu_pages/8);
780 797
781#ifdef CONFIG_IOMMU_LEAK 798#ifdef CONFIG_IOMMU_LEAK
782 if (leak_trace) { 799 if (leak_trace) {
783 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL, 800 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
784 get_order(iommu_pages*sizeof(void *))); 801 get_order(iommu_pages*sizeof(void *)));
785 if (iommu_leak_tab) 802 if (!iommu_leak_tab)
786 memset(iommu_leak_tab, 0, iommu_pages * 8);
787 else
788 printk(KERN_DEBUG 803 printk(KERN_DEBUG
789 "PCI-DMA: Cannot allocate leak trace area\n"); 804 "PCI-DMA: Cannot allocate leak trace area\n");
790 } 805 }
@@ -794,7 +809,7 @@ void __init gart_iommu_init(void)
794 * Out of IOMMU space handling. 809 * Out of IOMMU space handling.
795 * Reserve some invalid pages at the beginning of the GART. 810 * Reserve some invalid pages at the beginning of the GART.
796 */ 811 */
797 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES); 812 iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
798 813
799 agp_memory_reserved = iommu_size; 814 agp_memory_reserved = iommu_size;
800 printk(KERN_INFO 815 printk(KERN_INFO
@@ -852,7 +867,8 @@ void __init gart_parse_options(char *p)
852 if (!strncmp(p, "leak", 4)) { 867 if (!strncmp(p, "leak", 4)) {
853 leak_trace = 1; 868 leak_trace = 1;
854 p += 4; 869 p += 4;
855 if (*p == '=') ++p; 870 if (*p == '=')
871 ++p;
856 if (isdigit(*p) && get_option(&p, &arg)) 872 if (isdigit(*p) && get_option(&p, &arg))
857 iommu_leak_pages = arg; 873 iommu_leak_pages = arg;
858 } 874 }
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 3f91f71cdc3e..c70ab5a5d4c8 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -14,7 +14,7 @@
14static int 14static int
15check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) 15check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
16{ 16{
17 if (hwdev && bus + size > *hwdev->dma_mask) { 17 if (hwdev && !is_buffer_dma_capable(*hwdev->dma_mask, bus, size)) {
18 if (*hwdev->dma_mask >= DMA_32BIT_MASK) 18 if (*hwdev->dma_mask >= DMA_32BIT_MASK)
19 printk(KERN_ERR 19 printk(KERN_ERR
20 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", 20 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
@@ -72,7 +72,15 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
72 return nents; 72 return nents;
73} 73}
74 74
75static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
76 dma_addr_t dma_addr)
77{
78 free_pages((unsigned long)vaddr, get_order(size));
79}
80
75struct dma_mapping_ops nommu_dma_ops = { 81struct dma_mapping_ops nommu_dma_ops = {
82 .alloc_coherent = dma_generic_alloc_coherent,
83 .free_coherent = nommu_free_coherent,
76 .map_single = nommu_map_single, 84 .map_single = nommu_map_single,
77 .map_sg = nommu_map_sg, 85 .map_sg = nommu_map_sg,
78 .is_phys = 1, 86 .is_phys = 1,
diff --git a/arch/x86/kernel/pcspeaker.c b/arch/x86/kernel/pcspeaker.c
index bc1f2d3ea277..a311ffcaad16 100644
--- a/arch/x86/kernel/pcspeaker.c
+++ b/arch/x86/kernel/pcspeaker.c
@@ -1,20 +1,13 @@
1#include <linux/platform_device.h> 1#include <linux/platform_device.h>
2#include <linux/errno.h> 2#include <linux/err.h>
3#include <linux/init.h> 3#include <linux/init.h>
4 4
5static __init int add_pcspkr(void) 5static __init int add_pcspkr(void)
6{ 6{
7 struct platform_device *pd; 7 struct platform_device *pd;
8 int ret;
9 8
10 pd = platform_device_alloc("pcspkr", -1); 9 pd = platform_device_register_simple("pcspkr", -1, NULL, 0);
11 if (!pd)
12 return -ENOMEM;
13 10
14 ret = platform_device_add(pd); 11 return IS_ERR(pd) ? PTR_ERR(pd) : 0;
15 if (ret)
16 platform_device_put(pd);
17
18 return ret;
19} 12}
20device_initcall(add_pcspkr); 13device_initcall(add_pcspkr);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 876e91890777..c622772744d8 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -15,7 +15,6 @@ unsigned long idle_nomwait;
15EXPORT_SYMBOL(idle_nomwait); 15EXPORT_SYMBOL(idle_nomwait);
16 16
17struct kmem_cache *task_xstate_cachep; 17struct kmem_cache *task_xstate_cachep;
18static int force_mwait __cpuinitdata;
19 18
20int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 19int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
21{ 20{
@@ -185,7 +184,8 @@ static void mwait_idle(void)
185static void poll_idle(void) 184static void poll_idle(void)
186{ 185{
187 local_irq_enable(); 186 local_irq_enable();
188 cpu_relax(); 187 while (!need_resched())
188 cpu_relax();
189} 189}
190 190
191/* 191/*
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 31f40b24bf5d..0a1302fe6d45 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -37,6 +37,7 @@
37#include <linux/tick.h> 37#include <linux/tick.h>
38#include <linux/percpu.h> 38#include <linux/percpu.h>
39#include <linux/prctl.h> 39#include <linux/prctl.h>
40#include <linux/dmi.h>
40 41
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
@@ -56,6 +57,8 @@
56#include <asm/cpu.h> 57#include <asm/cpu.h>
57#include <asm/kdebug.h> 58#include <asm/kdebug.h>
58#include <asm/idle.h> 59#include <asm/idle.h>
60#include <asm/syscalls.h>
61#include <asm/smp.h>
59 62
60asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 63asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
61 64
@@ -73,47 +76,12 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
73 return ((unsigned long *)tsk->thread.sp)[3]; 76 return ((unsigned long *)tsk->thread.sp)[3];
74} 77}
75 78
76#ifdef CONFIG_HOTPLUG_CPU 79#ifndef CONFIG_SMP
77#include <asm/nmi.h>
78
79static void cpu_exit_clear(void)
80{
81 int cpu = raw_smp_processor_id();
82
83 idle_task_exit();
84
85 cpu_uninit();
86 irq_ctx_exit(cpu);
87
88 cpu_clear(cpu, cpu_callout_map);
89 cpu_clear(cpu, cpu_callin_map);
90
91 numa_remove_cpu(cpu);
92 c1e_remove_cpu(cpu);
93}
94
95/* We don't actually take CPU down, just spin without interrupts. */
96static inline void play_dead(void)
97{
98 /* This must be done before dead CPU ack */
99 cpu_exit_clear();
100 mb();
101 /* Ack it */
102 __get_cpu_var(cpu_state) = CPU_DEAD;
103
104 /*
105 * With physical CPU hotplug, we should halt the cpu
106 */
107 local_irq_disable();
108 /* mask all interrupts, flush any and all caches, and halt */
109 wbinvd_halt();
110}
111#else
112static inline void play_dead(void) 80static inline void play_dead(void)
113{ 81{
114 BUG(); 82 BUG();
115} 83}
116#endif /* CONFIG_HOTPLUG_CPU */ 84#endif
117 85
118/* 86/*
119 * The idle thread. There's no useful work to be 87 * The idle thread. There's no useful work to be
@@ -155,12 +123,13 @@ void cpu_idle(void)
155 } 123 }
156} 124}
157 125
158void __show_registers(struct pt_regs *regs, int all) 126void __show_regs(struct pt_regs *regs, int all)
159{ 127{
160 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 128 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
161 unsigned long d0, d1, d2, d3, d6, d7; 129 unsigned long d0, d1, d2, d3, d6, d7;
162 unsigned long sp; 130 unsigned long sp;
163 unsigned short ss, gs; 131 unsigned short ss, gs;
132 const char *board;
164 133
165 if (user_mode_vm(regs)) { 134 if (user_mode_vm(regs)) {
166 sp = regs->sp; 135 sp = regs->sp;
@@ -173,11 +142,15 @@ void __show_registers(struct pt_regs *regs, int all)
173 } 142 }
174 143
175 printk("\n"); 144 printk("\n");
176 printk("Pid: %d, comm: %s %s (%s %.*s)\n", 145
146 board = dmi_get_system_info(DMI_PRODUCT_NAME);
147 if (!board)
148 board = "";
149 printk("Pid: %d, comm: %s %s (%s %.*s) %s\n",
177 task_pid_nr(current), current->comm, 150 task_pid_nr(current), current->comm,
178 print_tainted(), init_utsname()->release, 151 print_tainted(), init_utsname()->release,
179 (int)strcspn(init_utsname()->version, " "), 152 (int)strcspn(init_utsname()->version, " "),
180 init_utsname()->version); 153 init_utsname()->version, board);
181 154
182 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", 155 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
183 (u16)regs->cs, regs->ip, regs->flags, 156 (u16)regs->cs, regs->ip, regs->flags,
@@ -216,7 +189,7 @@ void __show_registers(struct pt_regs *regs, int all)
216 189
217void show_regs(struct pt_regs *regs) 190void show_regs(struct pt_regs *regs)
218{ 191{
219 __show_registers(regs, 1); 192 __show_regs(regs, 1);
220 show_trace(NULL, regs, &regs->sp, regs->bp); 193 show_trace(NULL, regs, &regs->sp, regs->bp);
221} 194}
222 195
@@ -277,6 +250,14 @@ void exit_thread(void)
277 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 250 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
278 put_cpu(); 251 put_cpu();
279 } 252 }
253#ifdef CONFIG_X86_DS
254 /* Free any DS contexts that have not been properly released. */
255 if (unlikely(current->thread.ds_ctx)) {
256 /* we clear debugctl to make sure DS is not used. */
257 update_debugctlmsr(0);
258 ds_free(current->thread.ds_ctx);
259 }
260#endif /* CONFIG_X86_DS */
280} 261}
281 262
282void flush_thread(void) 263void flush_thread(void)
@@ -438,6 +419,35 @@ int set_tsc_mode(unsigned int val)
438 return 0; 419 return 0;
439} 420}
440 421
422#ifdef CONFIG_X86_DS
423static int update_debugctl(struct thread_struct *prev,
424 struct thread_struct *next, unsigned long debugctl)
425{
426 unsigned long ds_prev = 0;
427 unsigned long ds_next = 0;
428
429 if (prev->ds_ctx)
430 ds_prev = (unsigned long)prev->ds_ctx->ds;
431 if (next->ds_ctx)
432 ds_next = (unsigned long)next->ds_ctx->ds;
433
434 if (ds_next != ds_prev) {
435 /* we clear debugctl to make sure DS
436 * is not in use when we change it */
437 debugctl = 0;
438 update_debugctlmsr(0);
439 wrmsr(MSR_IA32_DS_AREA, ds_next, 0);
440 }
441 return debugctl;
442}
443#else
444static int update_debugctl(struct thread_struct *prev,
445 struct thread_struct *next, unsigned long debugctl)
446{
447 return debugctl;
448}
449#endif /* CONFIG_X86_DS */
450
441static noinline void 451static noinline void
442__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 452__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
443 struct tss_struct *tss) 453 struct tss_struct *tss)
@@ -448,14 +458,7 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
448 prev = &prev_p->thread; 458 prev = &prev_p->thread;
449 next = &next_p->thread; 459 next = &next_p->thread;
450 460
451 debugctl = prev->debugctlmsr; 461 debugctl = update_debugctl(prev, next, prev->debugctlmsr);
452 if (next->ds_area_msr != prev->ds_area_msr) {
453 /* we clear debugctl to make sure DS
454 * is not in use when we change it */
455 debugctl = 0;
456 update_debugctlmsr(0);
457 wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
458 }
459 462
460 if (next->debugctlmsr != debugctl) 463 if (next->debugctlmsr != debugctl)
461 update_debugctlmsr(next->debugctlmsr); 464 update_debugctlmsr(next->debugctlmsr);
@@ -479,13 +482,13 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
479 hard_enable_TSC(); 482 hard_enable_TSC();
480 } 483 }
481 484
482#ifdef X86_BTS 485#ifdef CONFIG_X86_PTRACE_BTS
483 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) 486 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
484 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); 487 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
485 488
486 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) 489 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
487 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); 490 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
488#endif 491#endif /* CONFIG_X86_PTRACE_BTS */
489 492
490 493
491 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { 494 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 3e3d503eadcf..c958120fb1b6 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -37,11 +37,11 @@
37#include <linux/kdebug.h> 37#include <linux/kdebug.h>
38#include <linux/tick.h> 38#include <linux/tick.h>
39#include <linux/prctl.h> 39#include <linux/prctl.h>
40#include <linux/uaccess.h>
41#include <linux/io.h>
40 42
41#include <asm/uaccess.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
43#include <asm/system.h> 44#include <asm/system.h>
44#include <asm/io.h>
45#include <asm/processor.h> 45#include <asm/processor.h>
46#include <asm/i387.h> 46#include <asm/i387.h>
47#include <asm/mmu_context.h> 47#include <asm/mmu_context.h>
@@ -51,6 +51,7 @@
51#include <asm/proto.h> 51#include <asm/proto.h>
52#include <asm/ia32.h> 52#include <asm/ia32.h>
53#include <asm/idle.h> 53#include <asm/idle.h>
54#include <asm/syscalls.h>
54 55
55asmlinkage extern void ret_from_fork(void); 56asmlinkage extern void ret_from_fork(void);
56 57
@@ -92,30 +93,12 @@ void exit_idle(void)
92 __exit_idle(); 93 __exit_idle();
93} 94}
94 95
95#ifdef CONFIG_HOTPLUG_CPU 96#ifndef CONFIG_SMP
96DECLARE_PER_CPU(int, cpu_state);
97
98#include <asm/nmi.h>
99/* We halt the CPU with physical CPU hotplug */
100static inline void play_dead(void)
101{
102 idle_task_exit();
103 c1e_remove_cpu(raw_smp_processor_id());
104
105 mb();
106 /* Ack it */
107 __get_cpu_var(cpu_state) = CPU_DEAD;
108
109 local_irq_disable();
110 /* mask all interrupts, flush any and all caches, and halt */
111 wbinvd_halt();
112}
113#else
114static inline void play_dead(void) 97static inline void play_dead(void)
115{ 98{
116 BUG(); 99 BUG();
117} 100}
118#endif /* CONFIG_HOTPLUG_CPU */ 101#endif
119 102
120/* 103/*
121 * The idle thread. There's no useful work to be 104 * The idle thread. There's no useful work to be
@@ -160,7 +143,7 @@ void cpu_idle(void)
160} 143}
161 144
162/* Prints also some state that isn't saved in the pt_regs */ 145/* Prints also some state that isn't saved in the pt_regs */
163void __show_regs(struct pt_regs * regs) 146void __show_regs(struct pt_regs *regs, int all)
164{ 147{
165 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; 148 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
166 unsigned long d0, d1, d2, d3, d6, d7; 149 unsigned long d0, d1, d2, d3, d6, d7;
@@ -169,60 +152,65 @@ void __show_regs(struct pt_regs * regs)
169 152
170 printk("\n"); 153 printk("\n");
171 print_modules(); 154 print_modules();
172 printk("Pid: %d, comm: %.20s %s %s %.*s\n", 155 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n",
173 current->pid, current->comm, print_tainted(), 156 current->pid, current->comm, print_tainted(),
174 init_utsname()->release, 157 init_utsname()->release,
175 (int)strcspn(init_utsname()->version, " "), 158 (int)strcspn(init_utsname()->version, " "),
176 init_utsname()->version); 159 init_utsname()->version);
177 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); 160 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
178 printk_address(regs->ip, 1); 161 printk_address(regs->ip, 1);
179 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp, 162 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
180 regs->flags); 163 regs->sp, regs->flags);
181 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n", 164 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
182 regs->ax, regs->bx, regs->cx); 165 regs->ax, regs->bx, regs->cx);
183 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n", 166 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
184 regs->dx, regs->si, regs->di); 167 regs->dx, regs->si, regs->di);
185 printk("RBP: %016lx R08: %016lx R09: %016lx\n", 168 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
186 regs->bp, regs->r8, regs->r9); 169 regs->bp, regs->r8, regs->r9);
187 printk("R10: %016lx R11: %016lx R12: %016lx\n", 170 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
188 regs->r10, regs->r11, regs->r12); 171 regs->r10, regs->r11, regs->r12);
189 printk("R13: %016lx R14: %016lx R15: %016lx\n", 172 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
190 regs->r13, regs->r14, regs->r15); 173 regs->r13, regs->r14, regs->r15);
191 174
192 asm("movl %%ds,%0" : "=r" (ds)); 175 asm("movl %%ds,%0" : "=r" (ds));
193 asm("movl %%cs,%0" : "=r" (cs)); 176 asm("movl %%cs,%0" : "=r" (cs));
194 asm("movl %%es,%0" : "=r" (es)); 177 asm("movl %%es,%0" : "=r" (es));
195 asm("movl %%fs,%0" : "=r" (fsindex)); 178 asm("movl %%fs,%0" : "=r" (fsindex));
196 asm("movl %%gs,%0" : "=r" (gsindex)); 179 asm("movl %%gs,%0" : "=r" (gsindex));
197 180
198 rdmsrl(MSR_FS_BASE, fs); 181 rdmsrl(MSR_FS_BASE, fs);
199 rdmsrl(MSR_GS_BASE, gs); 182 rdmsrl(MSR_GS_BASE, gs);
200 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 183 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
184
185 if (!all)
186 return;
201 187
202 cr0 = read_cr0(); 188 cr0 = read_cr0();
203 cr2 = read_cr2(); 189 cr2 = read_cr2();
204 cr3 = read_cr3(); 190 cr3 = read_cr3();
205 cr4 = read_cr4(); 191 cr4 = read_cr4();
206 192
207 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 193 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
208 fs,fsindex,gs,gsindex,shadowgs); 194 fs, fsindex, gs, gsindex, shadowgs);
209 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); 195 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
210 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); 196 es, cr0);
197 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
198 cr4);
211 199
212 get_debugreg(d0, 0); 200 get_debugreg(d0, 0);
213 get_debugreg(d1, 1); 201 get_debugreg(d1, 1);
214 get_debugreg(d2, 2); 202 get_debugreg(d2, 2);
215 printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); 203 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
216 get_debugreg(d3, 3); 204 get_debugreg(d3, 3);
217 get_debugreg(d6, 6); 205 get_debugreg(d6, 6);
218 get_debugreg(d7, 7); 206 get_debugreg(d7, 7);
219 printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); 207 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
220} 208}
221 209
222void show_regs(struct pt_regs *regs) 210void show_regs(struct pt_regs *regs)
223{ 211{
224 printk("CPU %d:", smp_processor_id()); 212 printk(KERN_INFO "CPU %d:", smp_processor_id());
225 __show_regs(regs); 213 __show_regs(regs, 1);
226 show_trace(NULL, regs, (void *)(regs + 1), regs->bp); 214 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
227} 215}
228 216
@@ -247,6 +235,14 @@ void exit_thread(void)
247 t->io_bitmap_max = 0; 235 t->io_bitmap_max = 0;
248 put_cpu(); 236 put_cpu();
249 } 237 }
238#ifdef CONFIG_X86_DS
239 /* Free any DS contexts that have not been properly released. */
240 if (unlikely(t->ds_ctx)) {
241 /* we clear debugctl to make sure DS is not used. */
242 update_debugctlmsr(0);
243 ds_free(t->ds_ctx);
244 }
245#endif /* CONFIG_X86_DS */
250} 246}
251 247
252void flush_thread(void) 248void flush_thread(void)
@@ -322,10 +318,10 @@ void prepare_to_copy(struct task_struct *tsk)
322 318
323int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 319int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
324 unsigned long unused, 320 unsigned long unused,
325 struct task_struct * p, struct pt_regs * regs) 321 struct task_struct *p, struct pt_regs *regs)
326{ 322{
327 int err; 323 int err;
328 struct pt_regs * childregs; 324 struct pt_regs *childregs;
329 struct task_struct *me = current; 325 struct task_struct *me = current;
330 326
331 childregs = ((struct pt_regs *) 327 childregs = ((struct pt_regs *)
@@ -370,10 +366,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
370 if (test_thread_flag(TIF_IA32)) 366 if (test_thread_flag(TIF_IA32))
371 err = do_set_thread_area(p, -1, 367 err = do_set_thread_area(p, -1,
372 (struct user_desc __user *)childregs->si, 0); 368 (struct user_desc __user *)childregs->si, 0);
373 else 369 else
374#endif 370#endif
375 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 371 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
376 if (err) 372 if (err)
377 goto out; 373 goto out;
378 } 374 }
379 err = 0; 375 err = 0;
@@ -480,13 +476,27 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
480 next = &next_p->thread; 476 next = &next_p->thread;
481 477
482 debugctl = prev->debugctlmsr; 478 debugctl = prev->debugctlmsr;
483 if (next->ds_area_msr != prev->ds_area_msr) { 479
484 /* we clear debugctl to make sure DS 480#ifdef CONFIG_X86_DS
485 * is not in use when we change it */ 481 {
486 debugctl = 0; 482 unsigned long ds_prev = 0, ds_next = 0;
487 update_debugctlmsr(0); 483
488 wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr); 484 if (prev->ds_ctx)
485 ds_prev = (unsigned long)prev->ds_ctx->ds;
486 if (next->ds_ctx)
487 ds_next = (unsigned long)next->ds_ctx->ds;
488
489 if (ds_next != ds_prev) {
490 /*
491 * We clear debugctl to make sure DS
492 * is not in use when we change it:
493 */
494 debugctl = 0;
495 update_debugctlmsr(0);
496 wrmsrl(MSR_IA32_DS_AREA, ds_next);
497 }
489 } 498 }
499#endif /* CONFIG_X86_DS */
490 500
491 if (next->debugctlmsr != debugctl) 501 if (next->debugctlmsr != debugctl)
492 update_debugctlmsr(next->debugctlmsr); 502 update_debugctlmsr(next->debugctlmsr);
@@ -524,13 +534,13 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
524 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); 534 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
525 } 535 }
526 536
527#ifdef X86_BTS 537#ifdef CONFIG_X86_PTRACE_BTS
528 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) 538 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
529 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); 539 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
530 540
531 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) 541 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
532 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); 542 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
533#endif 543#endif /* CONFIG_X86_PTRACE_BTS */
534} 544}
535 545
536/* 546/*
@@ -552,7 +562,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
552 unsigned fsindex, gsindex; 562 unsigned fsindex, gsindex;
553 563
554 /* we're going to use this soon, after a few expensive things */ 564 /* we're going to use this soon, after a few expensive things */
555 if (next_p->fpu_counter>5) 565 if (next_p->fpu_counter > 5)
556 prefetch(next->xstate); 566 prefetch(next->xstate);
557 567
558 /* 568 /*
@@ -560,13 +570,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
560 */ 570 */
561 load_sp0(tss, next); 571 load_sp0(tss, next);
562 572
563 /* 573 /*
564 * Switch DS and ES. 574 * Switch DS and ES.
565 * This won't pick up thread selector changes, but I guess that is ok. 575 * This won't pick up thread selector changes, but I guess that is ok.
566 */ 576 */
567 savesegment(es, prev->es); 577 savesegment(es, prev->es);
568 if (unlikely(next->es | prev->es)) 578 if (unlikely(next->es | prev->es))
569 loadsegment(es, next->es); 579 loadsegment(es, next->es);
570 580
571 savesegment(ds, prev->ds); 581 savesegment(ds, prev->ds);
572 if (unlikely(next->ds | prev->ds)) 582 if (unlikely(next->ds | prev->ds))
@@ -592,7 +602,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
592 */ 602 */
593 arch_leave_lazy_cpu_mode(); 603 arch_leave_lazy_cpu_mode();
594 604
595 /* 605 /*
596 * Switch FS and GS. 606 * Switch FS and GS.
597 * 607 *
598 * Segment register != 0 always requires a reload. Also 608 * Segment register != 0 always requires a reload. Also
@@ -601,13 +611,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
601 */ 611 */
602 if (unlikely(fsindex | next->fsindex | prev->fs)) { 612 if (unlikely(fsindex | next->fsindex | prev->fs)) {
603 loadsegment(fs, next->fsindex); 613 loadsegment(fs, next->fsindex);
604 /* 614 /*
605 * Check if the user used a selector != 0; if yes 615 * Check if the user used a selector != 0; if yes
606 * clear 64bit base, since overloaded base is always 616 * clear 64bit base, since overloaded base is always
607 * mapped to the Null selector 617 * mapped to the Null selector
608 */ 618 */
609 if (fsindex) 619 if (fsindex)
610 prev->fs = 0; 620 prev->fs = 0;
611 } 621 }
612 /* when next process has a 64bit base use it */ 622 /* when next process has a 64bit base use it */
613 if (next->fs) 623 if (next->fs)
@@ -617,7 +627,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
617 if (unlikely(gsindex | next->gsindex | prev->gs)) { 627 if (unlikely(gsindex | next->gsindex | prev->gs)) {
618 load_gs_index(next->gsindex); 628 load_gs_index(next->gsindex);
619 if (gsindex) 629 if (gsindex)
620 prev->gs = 0; 630 prev->gs = 0;
621 } 631 }
622 if (next->gs) 632 if (next->gs)
623 wrmsrl(MSR_KERNEL_GS_BASE, next->gs); 633 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
@@ -626,12 +636,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
626 /* Must be after DS reload */ 636 /* Must be after DS reload */
627 unlazy_fpu(prev_p); 637 unlazy_fpu(prev_p);
628 638
629 /* 639 /*
630 * Switch the PDA and FPU contexts. 640 * Switch the PDA and FPU contexts.
631 */ 641 */
632 prev->usersp = read_pda(oldrsp); 642 prev->usersp = read_pda(oldrsp);
633 write_pda(oldrsp, next->usersp); 643 write_pda(oldrsp, next->usersp);
634 write_pda(pcurrent, next_p); 644 write_pda(pcurrent, next_p);
635 645
636 write_pda(kernelstack, 646 write_pda(kernelstack,
637 (unsigned long)task_stack_page(next_p) + 647 (unsigned long)task_stack_page(next_p) +
@@ -672,7 +682,7 @@ long sys_execve(char __user *name, char __user * __user *argv,
672 char __user * __user *envp, struct pt_regs *regs) 682 char __user * __user *envp, struct pt_regs *regs)
673{ 683{
674 long error; 684 long error;
675 char * filename; 685 char *filename;
676 686
677 filename = getname(name); 687 filename = getname(name);
678 error = PTR_ERR(filename); 688 error = PTR_ERR(filename);
@@ -730,55 +740,55 @@ asmlinkage long sys_vfork(struct pt_regs *regs)
730unsigned long get_wchan(struct task_struct *p) 740unsigned long get_wchan(struct task_struct *p)
731{ 741{
732 unsigned long stack; 742 unsigned long stack;
733 u64 fp,ip; 743 u64 fp, ip;
734 int count = 0; 744 int count = 0;
735 745
736 if (!p || p == current || p->state==TASK_RUNNING) 746 if (!p || p == current || p->state == TASK_RUNNING)
737 return 0; 747 return 0;
738 stack = (unsigned long)task_stack_page(p); 748 stack = (unsigned long)task_stack_page(p);
739 if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) 749 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
740 return 0; 750 return 0;
741 fp = *(u64 *)(p->thread.sp); 751 fp = *(u64 *)(p->thread.sp);
742 do { 752 do {
743 if (fp < (unsigned long)stack || 753 if (fp < (unsigned long)stack ||
744 fp > (unsigned long)stack+THREAD_SIZE) 754 fp >= (unsigned long)stack+THREAD_SIZE)
745 return 0; 755 return 0;
746 ip = *(u64 *)(fp+8); 756 ip = *(u64 *)(fp+8);
747 if (!in_sched_functions(ip)) 757 if (!in_sched_functions(ip))
748 return ip; 758 return ip;
749 fp = *(u64 *)fp; 759 fp = *(u64 *)fp;
750 } while (count++ < 16); 760 } while (count++ < 16);
751 return 0; 761 return 0;
752} 762}
753 763
754long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) 764long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
755{ 765{
756 int ret = 0; 766 int ret = 0;
757 int doit = task == current; 767 int doit = task == current;
758 int cpu; 768 int cpu;
759 769
760 switch (code) { 770 switch (code) {
761 case ARCH_SET_GS: 771 case ARCH_SET_GS:
762 if (addr >= TASK_SIZE_OF(task)) 772 if (addr >= TASK_SIZE_OF(task))
763 return -EPERM; 773 return -EPERM;
764 cpu = get_cpu(); 774 cpu = get_cpu();
765 /* handle small bases via the GDT because that's faster to 775 /* handle small bases via the GDT because that's faster to
766 switch. */ 776 switch. */
767 if (addr <= 0xffffffff) { 777 if (addr <= 0xffffffff) {
768 set_32bit_tls(task, GS_TLS, addr); 778 set_32bit_tls(task, GS_TLS, addr);
769 if (doit) { 779 if (doit) {
770 load_TLS(&task->thread, cpu); 780 load_TLS(&task->thread, cpu);
771 load_gs_index(GS_TLS_SEL); 781 load_gs_index(GS_TLS_SEL);
772 } 782 }
773 task->thread.gsindex = GS_TLS_SEL; 783 task->thread.gsindex = GS_TLS_SEL;
774 task->thread.gs = 0; 784 task->thread.gs = 0;
775 } else { 785 } else {
776 task->thread.gsindex = 0; 786 task->thread.gsindex = 0;
777 task->thread.gs = addr; 787 task->thread.gs = addr;
778 if (doit) { 788 if (doit) {
779 load_gs_index(0); 789 load_gs_index(0);
780 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); 790 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
781 } 791 }
782 } 792 }
783 put_cpu(); 793 put_cpu();
784 break; 794 break;
@@ -832,8 +842,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
832 rdmsrl(MSR_KERNEL_GS_BASE, base); 842 rdmsrl(MSR_KERNEL_GS_BASE, base);
833 else 843 else
834 base = task->thread.gs; 844 base = task->thread.gs;
835 } 845 } else
836 else
837 base = task->thread.gs; 846 base = task->thread.gs;
838 ret = put_user(base, (unsigned long __user *)addr); 847 ret = put_user(base, (unsigned long __user *)addr);
839 break; 848 break;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index e37dccce85db..0a6d8c12e10d 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -14,6 +14,7 @@
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/ptrace.h> 15#include <linux/ptrace.h>
16#include <linux/regset.h> 16#include <linux/regset.h>
17#include <linux/tracehook.h>
17#include <linux/user.h> 18#include <linux/user.h>
18#include <linux/elf.h> 19#include <linux/elf.h>
19#include <linux/security.h> 20#include <linux/security.h>
@@ -39,7 +40,9 @@ enum x86_regset {
39 REGSET_GENERAL, 40 REGSET_GENERAL,
40 REGSET_FP, 41 REGSET_FP,
41 REGSET_XFP, 42 REGSET_XFP,
43 REGSET_IOPERM64 = REGSET_XFP,
42 REGSET_TLS, 44 REGSET_TLS,
45 REGSET_IOPERM32,
43}; 46};
44 47
45/* 48/*
@@ -69,7 +72,7 @@ static inline bool invalid_selector(u16 value)
69 72
70#define FLAG_MASK FLAG_MASK_32 73#define FLAG_MASK FLAG_MASK_32
71 74
72static long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 75static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
73{ 76{
74 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); 77 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
75 regno >>= 2; 78 regno >>= 2;
@@ -554,45 +557,138 @@ static int ptrace_set_debugreg(struct task_struct *child,
554 return 0; 557 return 0;
555} 558}
556 559
557#ifdef X86_BTS 560/*
561 * These access the current or another (stopped) task's io permission
562 * bitmap for debugging or core dump.
563 */
564static int ioperm_active(struct task_struct *target,
565 const struct user_regset *regset)
566{
567 return target->thread.io_bitmap_max / regset->size;
568}
558 569
559static int ptrace_bts_get_size(struct task_struct *child) 570static int ioperm_get(struct task_struct *target,
571 const struct user_regset *regset,
572 unsigned int pos, unsigned int count,
573 void *kbuf, void __user *ubuf)
560{ 574{
561 if (!child->thread.ds_area_msr) 575 if (!target->thread.io_bitmap_ptr)
562 return -ENXIO; 576 return -ENXIO;
563 577
564 return ds_get_bts_index((void *)child->thread.ds_area_msr); 578 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
579 target->thread.io_bitmap_ptr,
580 0, IO_BITMAP_BYTES);
581}
582
583#ifdef CONFIG_X86_PTRACE_BTS
584/*
585 * The configuration for a particular BTS hardware implementation.
586 */
587struct bts_configuration {
588 /* the size of a BTS record in bytes; at most BTS_MAX_RECORD_SIZE */
589 unsigned char sizeof_bts;
590 /* the size of a field in the BTS record in bytes */
591 unsigned char sizeof_field;
592 /* a bitmask to enable/disable BTS in DEBUGCTL MSR */
593 unsigned long debugctl_mask;
594};
595static struct bts_configuration bts_cfg;
596
597#define BTS_MAX_RECORD_SIZE (8 * 3)
598
599
600/*
601 * Branch Trace Store (BTS) uses the following format. Different
602 * architectures vary in the size of those fields.
603 * - source linear address
604 * - destination linear address
605 * - flags
606 *
607 * Later architectures use 64bit pointers throughout, whereas earlier
608 * architectures use 32bit pointers in 32bit mode.
609 *
610 * We compute the base address for the first 8 fields based on:
611 * - the field size stored in the DS configuration
612 * - the relative field position
613 *
614 * In order to store additional information in the BTS buffer, we use
615 * a special source address to indicate that the record requires
616 * special interpretation.
617 *
618 * Netburst indicated via a bit in the flags field whether the branch
619 * was predicted; this is ignored.
620 */
621
622enum bts_field {
623 bts_from = 0,
624 bts_to,
625 bts_flags,
626
627 bts_escape = (unsigned long)-1,
628 bts_qual = bts_to,
629 bts_jiffies = bts_flags
630};
631
632static inline unsigned long bts_get(const char *base, enum bts_field field)
633{
634 base += (bts_cfg.sizeof_field * field);
635 return *(unsigned long *)base;
636}
637
638static inline void bts_set(char *base, enum bts_field field, unsigned long val)
639{
640 base += (bts_cfg.sizeof_field * field);;
641 (*(unsigned long *)base) = val;
642}
643
644/*
645 * Translate a BTS record from the raw format into the bts_struct format
646 *
647 * out (out): bts_struct interpretation
648 * raw: raw BTS record
649 */
650static void ptrace_bts_translate_record(struct bts_struct *out, const void *raw)
651{
652 memset(out, 0, sizeof(*out));
653 if (bts_get(raw, bts_from) == bts_escape) {
654 out->qualifier = bts_get(raw, bts_qual);
655 out->variant.jiffies = bts_get(raw, bts_jiffies);
656 } else {
657 out->qualifier = BTS_BRANCH;
658 out->variant.lbr.from_ip = bts_get(raw, bts_from);
659 out->variant.lbr.to_ip = bts_get(raw, bts_to);
660 }
565} 661}
566 662
567static int ptrace_bts_read_record(struct task_struct *child, 663static int ptrace_bts_read_record(struct task_struct *child, size_t index,
568 long index,
569 struct bts_struct __user *out) 664 struct bts_struct __user *out)
570{ 665{
571 struct bts_struct ret; 666 struct bts_struct ret;
572 int retval; 667 const void *bts_record;
573 int bts_end; 668 size_t bts_index, bts_end;
574 int bts_index; 669 int error;
575
576 if (!child->thread.ds_area_msr)
577 return -ENXIO;
578 670
579 if (index < 0) 671 error = ds_get_bts_end(child, &bts_end);
580 return -EINVAL; 672 if (error < 0)
673 return error;
581 674
582 bts_end = ds_get_bts_end((void *)child->thread.ds_area_msr);
583 if (bts_end <= index) 675 if (bts_end <= index)
584 return -EINVAL; 676 return -EINVAL;
585 677
678 error = ds_get_bts_index(child, &bts_index);
679 if (error < 0)
680 return error;
681
586 /* translate the ptrace bts index into the ds bts index */ 682 /* translate the ptrace bts index into the ds bts index */
587 bts_index = ds_get_bts_index((void *)child->thread.ds_area_msr); 683 bts_index += bts_end - (index + 1);
588 bts_index -= (index + 1); 684 if (bts_end <= bts_index)
589 if (bts_index < 0) 685 bts_index -= bts_end;
590 bts_index += bts_end; 686
687 error = ds_access_bts(child, bts_index, &bts_record);
688 if (error < 0)
689 return error;
591 690
592 retval = ds_read_bts((void *)child->thread.ds_area_msr, 691 ptrace_bts_translate_record(&ret, bts_record);
593 bts_index, &ret);
594 if (retval < 0)
595 return retval;
596 692
597 if (copy_to_user(out, &ret, sizeof(ret))) 693 if (copy_to_user(out, &ret, sizeof(ret)))
598 return -EFAULT; 694 return -EFAULT;
@@ -600,101 +696,106 @@ static int ptrace_bts_read_record(struct task_struct *child,
600 return sizeof(ret); 696 return sizeof(ret);
601} 697}
602 698
603static int ptrace_bts_clear(struct task_struct *child)
604{
605 if (!child->thread.ds_area_msr)
606 return -ENXIO;
607
608 return ds_clear((void *)child->thread.ds_area_msr);
609}
610
611static int ptrace_bts_drain(struct task_struct *child, 699static int ptrace_bts_drain(struct task_struct *child,
612 long size, 700 long size,
613 struct bts_struct __user *out) 701 struct bts_struct __user *out)
614{ 702{
615 int end, i; 703 struct bts_struct ret;
616 void *ds = (void *)child->thread.ds_area_msr; 704 const unsigned char *raw;
617 705 size_t end, i;
618 if (!ds) 706 int error;
619 return -ENXIO;
620 707
621 end = ds_get_bts_index(ds); 708 error = ds_get_bts_index(child, &end);
622 if (end <= 0) 709 if (error < 0)
623 return end; 710 return error;
624 711
625 if (size < (end * sizeof(struct bts_struct))) 712 if (size < (end * sizeof(struct bts_struct)))
626 return -EIO; 713 return -EIO;
627 714
628 for (i = 0; i < end; i++, out++) { 715 error = ds_access_bts(child, 0, (const void **)&raw);
629 struct bts_struct ret; 716 if (error < 0)
630 int retval; 717 return error;
631 718
632 retval = ds_read_bts(ds, i, &ret); 719 for (i = 0; i < end; i++, out++, raw += bts_cfg.sizeof_bts) {
633 if (retval < 0) 720 ptrace_bts_translate_record(&ret, raw);
634 return retval;
635 721
636 if (copy_to_user(out, &ret, sizeof(ret))) 722 if (copy_to_user(out, &ret, sizeof(ret)))
637 return -EFAULT; 723 return -EFAULT;
638 } 724 }
639 725
640 ds_clear(ds); 726 error = ds_clear_bts(child);
727 if (error < 0)
728 return error;
641 729
642 return end; 730 return end;
643} 731}
644 732
733static void ptrace_bts_ovfl(struct task_struct *child)
734{
735 send_sig(child->thread.bts_ovfl_signal, child, 0);
736}
737
645static int ptrace_bts_config(struct task_struct *child, 738static int ptrace_bts_config(struct task_struct *child,
646 long cfg_size, 739 long cfg_size,
647 const struct ptrace_bts_config __user *ucfg) 740 const struct ptrace_bts_config __user *ucfg)
648{ 741{
649 struct ptrace_bts_config cfg; 742 struct ptrace_bts_config cfg;
650 int bts_size, ret = 0; 743 int error = 0;
651 void *ds;
652 744
745 error = -EOPNOTSUPP;
746 if (!bts_cfg.sizeof_bts)
747 goto errout;
748
749 error = -EIO;
653 if (cfg_size < sizeof(cfg)) 750 if (cfg_size < sizeof(cfg))
654 return -EIO; 751 goto errout;
655 752
753 error = -EFAULT;
656 if (copy_from_user(&cfg, ucfg, sizeof(cfg))) 754 if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
657 return -EFAULT; 755 goto errout;
658 756
659 if ((int)cfg.size < 0) 757 error = -EINVAL;
660 return -EINVAL; 758 if ((cfg.flags & PTRACE_BTS_O_SIGNAL) &&
759 !(cfg.flags & PTRACE_BTS_O_ALLOC))
760 goto errout;
661 761
662 bts_size = 0; 762 if (cfg.flags & PTRACE_BTS_O_ALLOC) {
663 ds = (void *)child->thread.ds_area_msr; 763 ds_ovfl_callback_t ovfl = NULL;
664 if (ds) { 764 unsigned int sig = 0;
665 bts_size = ds_get_bts_size(ds); 765
666 if (bts_size < 0) 766 /* we ignore the error in case we were not tracing child */
667 return bts_size; 767 (void)ds_release_bts(child);
668 } 768
669 cfg.size = PAGE_ALIGN(cfg.size); 769 if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
770 if (!cfg.signal)
771 goto errout;
772
773 sig = cfg.signal;
774 ovfl = ptrace_bts_ovfl;
775 }
670 776
671 if (bts_size != cfg.size) { 777 error = ds_request_bts(child, /* base = */ NULL, cfg.size, ovfl);
672 ret = ptrace_bts_realloc(child, cfg.size, 778 if (error < 0)
673 cfg.flags & PTRACE_BTS_O_CUT_SIZE);
674 if (ret < 0)
675 goto errout; 779 goto errout;
676 780
677 ds = (void *)child->thread.ds_area_msr; 781 child->thread.bts_ovfl_signal = sig;
678 } 782 }
679 783
680 if (cfg.flags & PTRACE_BTS_O_SIGNAL) 784 error = -EINVAL;
681 ret = ds_set_overflow(ds, DS_O_SIGNAL); 785 if (!child->thread.ds_ctx && cfg.flags)
682 else
683 ret = ds_set_overflow(ds, DS_O_WRAP);
684 if (ret < 0)
685 goto errout; 786 goto errout;
686 787
687 if (cfg.flags & PTRACE_BTS_O_TRACE) 788 if (cfg.flags & PTRACE_BTS_O_TRACE)
688 child->thread.debugctlmsr |= ds_debugctl_mask(); 789 child->thread.debugctlmsr |= bts_cfg.debugctl_mask;
689 else 790 else
690 child->thread.debugctlmsr &= ~ds_debugctl_mask(); 791 child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
691 792
692 if (cfg.flags & PTRACE_BTS_O_SCHED) 793 if (cfg.flags & PTRACE_BTS_O_SCHED)
693 set_tsk_thread_flag(child, TIF_BTS_TRACE_TS); 794 set_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
694 else 795 else
695 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); 796 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
696 797
697 ret = sizeof(cfg); 798 error = sizeof(cfg);
698 799
699out: 800out:
700 if (child->thread.debugctlmsr) 801 if (child->thread.debugctlmsr)
@@ -702,10 +803,10 @@ out:
702 else 803 else
703 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); 804 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
704 805
705 return ret; 806 return error;
706 807
707errout: 808errout:
708 child->thread.debugctlmsr &= ~ds_debugctl_mask(); 809 child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
709 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); 810 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
710 goto out; 811 goto out;
711} 812}
@@ -714,29 +815,40 @@ static int ptrace_bts_status(struct task_struct *child,
714 long cfg_size, 815 long cfg_size,
715 struct ptrace_bts_config __user *ucfg) 816 struct ptrace_bts_config __user *ucfg)
716{ 817{
717 void *ds = (void *)child->thread.ds_area_msr;
718 struct ptrace_bts_config cfg; 818 struct ptrace_bts_config cfg;
819 size_t end;
820 const void *base, *max;
821 int error;
719 822
720 if (cfg_size < sizeof(cfg)) 823 if (cfg_size < sizeof(cfg))
721 return -EIO; 824 return -EIO;
722 825
723 memset(&cfg, 0, sizeof(cfg)); 826 error = ds_get_bts_end(child, &end);
827 if (error < 0)
828 return error;
724 829
725 if (ds) { 830 error = ds_access_bts(child, /* index = */ 0, &base);
726 cfg.size = ds_get_bts_size(ds); 831 if (error < 0)
832 return error;
727 833
728 if (ds_get_overflow(ds) == DS_O_SIGNAL) 834 error = ds_access_bts(child, /* index = */ end, &max);
729 cfg.flags |= PTRACE_BTS_O_SIGNAL; 835 if (error < 0)
836 return error;
730 837
731 if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) && 838 memset(&cfg, 0, sizeof(cfg));
732 child->thread.debugctlmsr & ds_debugctl_mask()) 839 cfg.size = (max - base);
733 cfg.flags |= PTRACE_BTS_O_TRACE; 840 cfg.signal = child->thread.bts_ovfl_signal;
841 cfg.bts_size = sizeof(struct bts_struct);
734 842
735 if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS)) 843 if (cfg.signal)
736 cfg.flags |= PTRACE_BTS_O_SCHED; 844 cfg.flags |= PTRACE_BTS_O_SIGNAL;
737 }
738 845
739 cfg.bts_size = sizeof(struct bts_struct); 846 if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) &&
847 child->thread.debugctlmsr & bts_cfg.debugctl_mask)
848 cfg.flags |= PTRACE_BTS_O_TRACE;
849
850 if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS))
851 cfg.flags |= PTRACE_BTS_O_SCHED;
740 852
741 if (copy_to_user(ucfg, &cfg, sizeof(cfg))) 853 if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
742 return -EFAULT; 854 return -EFAULT;
@@ -744,89 +856,38 @@ static int ptrace_bts_status(struct task_struct *child,
744 return sizeof(cfg); 856 return sizeof(cfg);
745} 857}
746 858
747
748static int ptrace_bts_write_record(struct task_struct *child, 859static int ptrace_bts_write_record(struct task_struct *child,
749 const struct bts_struct *in) 860 const struct bts_struct *in)
750{ 861{
751 int retval; 862 unsigned char bts_record[BTS_MAX_RECORD_SIZE];
752 863
753 if (!child->thread.ds_area_msr) 864 BUG_ON(BTS_MAX_RECORD_SIZE < bts_cfg.sizeof_bts);
754 return -ENXIO;
755 865
756 retval = ds_write_bts((void *)child->thread.ds_area_msr, in); 866 memset(bts_record, 0, bts_cfg.sizeof_bts);
757 if (retval) 867 switch (in->qualifier) {
758 return retval; 868 case BTS_INVALID:
869 break;
759 870
760 return sizeof(*in); 871 case BTS_BRANCH:
761} 872 bts_set(bts_record, bts_from, in->variant.lbr.from_ip);
873 bts_set(bts_record, bts_to, in->variant.lbr.to_ip);
874 break;
762 875
763static int ptrace_bts_realloc(struct task_struct *child, 876 case BTS_TASK_ARRIVES:
764 int size, int reduce_size) 877 case BTS_TASK_DEPARTS:
765{ 878 bts_set(bts_record, bts_from, bts_escape);
766 unsigned long rlim, vm; 879 bts_set(bts_record, bts_qual, in->qualifier);
767 int ret, old_size; 880 bts_set(bts_record, bts_jiffies, in->variant.jiffies);
881 break;
768 882
769 if (size < 0) 883 default:
770 return -EINVAL; 884 return -EINVAL;
771
772 old_size = ds_get_bts_size((void *)child->thread.ds_area_msr);
773 if (old_size < 0)
774 return old_size;
775
776 ret = ds_free((void **)&child->thread.ds_area_msr);
777 if (ret < 0)
778 goto out;
779
780 size >>= PAGE_SHIFT;
781 old_size >>= PAGE_SHIFT;
782
783 current->mm->total_vm -= old_size;
784 current->mm->locked_vm -= old_size;
785
786 if (size == 0)
787 goto out;
788
789 rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
790 vm = current->mm->total_vm + size;
791 if (rlim < vm) {
792 ret = -ENOMEM;
793
794 if (!reduce_size)
795 goto out;
796
797 size = rlim - current->mm->total_vm;
798 if (size <= 0)
799 goto out;
800 }
801
802 rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
803 vm = current->mm->locked_vm + size;
804 if (rlim < vm) {
805 ret = -ENOMEM;
806
807 if (!reduce_size)
808 goto out;
809
810 size = rlim - current->mm->locked_vm;
811 if (size <= 0)
812 goto out;
813 } 885 }
814 886
815 ret = ds_allocate((void **)&child->thread.ds_area_msr, 887 /* The writing task will be the switched-to task on a context
816 size << PAGE_SHIFT); 888 * switch. It needs to write into the switched-from task's BTS
817 if (ret < 0) 889 * buffer. */
818 goto out; 890 return ds_unchecked_write_bts(child, bts_record, bts_cfg.sizeof_bts);
819
820 current->mm->total_vm += size;
821 current->mm->locked_vm += size;
822
823out:
824 if (child->thread.ds_area_msr)
825 set_tsk_thread_flag(child, TIF_DS_AREA_MSR);
826 else
827 clear_tsk_thread_flag(child, TIF_DS_AREA_MSR);
828
829 return ret;
830} 891}
831 892
832void ptrace_bts_take_timestamp(struct task_struct *tsk, 893void ptrace_bts_take_timestamp(struct task_struct *tsk,
@@ -839,7 +900,66 @@ void ptrace_bts_take_timestamp(struct task_struct *tsk,
839 900
840 ptrace_bts_write_record(tsk, &rec); 901 ptrace_bts_write_record(tsk, &rec);
841} 902}
842#endif /* X86_BTS */ 903
904static const struct bts_configuration bts_cfg_netburst = {
905 .sizeof_bts = sizeof(long) * 3,
906 .sizeof_field = sizeof(long),
907 .debugctl_mask = (1<<2)|(1<<3)|(1<<5)
908};
909
910static const struct bts_configuration bts_cfg_pentium_m = {
911 .sizeof_bts = sizeof(long) * 3,
912 .sizeof_field = sizeof(long),
913 .debugctl_mask = (1<<6)|(1<<7)
914};
915
916static const struct bts_configuration bts_cfg_core2 = {
917 .sizeof_bts = 8 * 3,
918 .sizeof_field = 8,
919 .debugctl_mask = (1<<6)|(1<<7)|(1<<9)
920};
921
922static inline void bts_configure(const struct bts_configuration *cfg)
923{
924 bts_cfg = *cfg;
925}
926
927void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *c)
928{
929 switch (c->x86) {
930 case 0x6:
931 switch (c->x86_model) {
932 case 0xD:
933 case 0xE: /* Pentium M */
934 bts_configure(&bts_cfg_pentium_m);
935 break;
936 case 0xF: /* Core2 */
937 case 0x1C: /* Atom */
938 bts_configure(&bts_cfg_core2);
939 break;
940 default:
941 /* sorry, don't know about them */
942 break;
943 }
944 break;
945 case 0xF:
946 switch (c->x86_model) {
947 case 0x0:
948 case 0x1:
949 case 0x2: /* Netburst */
950 bts_configure(&bts_cfg_netburst);
951 break;
952 default:
953 /* sorry, don't know about them */
954 break;
955 }
956 break;
957 default:
958 /* sorry, don't know about them */
959 break;
960 }
961}
962#endif /* CONFIG_X86_PTRACE_BTS */
843 963
844/* 964/*
845 * Called by kernel/ptrace.c when detaching.. 965 * Called by kernel/ptrace.c when detaching..
@@ -852,15 +972,15 @@ void ptrace_disable(struct task_struct *child)
852#ifdef TIF_SYSCALL_EMU 972#ifdef TIF_SYSCALL_EMU
853 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 973 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
854#endif 974#endif
855 if (child->thread.ds_area_msr) { 975#ifdef CONFIG_X86_PTRACE_BTS
856#ifdef X86_BTS 976 (void)ds_release_bts(child);
857 ptrace_bts_realloc(child, 0, 0); 977
858#endif 978 child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
859 child->thread.debugctlmsr &= ~ds_debugctl_mask(); 979 if (!child->thread.debugctlmsr)
860 if (!child->thread.debugctlmsr) 980 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
861 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); 981
862 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); 982 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
863 } 983#endif /* CONFIG_X86_PTRACE_BTS */
864} 984}
865 985
866#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 986#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
@@ -980,7 +1100,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
980 /* 1100 /*
981 * These bits need more cooking - not enabled yet: 1101 * These bits need more cooking - not enabled yet:
982 */ 1102 */
983#ifdef X86_BTS 1103#ifdef CONFIG_X86_PTRACE_BTS
984 case PTRACE_BTS_CONFIG: 1104 case PTRACE_BTS_CONFIG:
985 ret = ptrace_bts_config 1105 ret = ptrace_bts_config
986 (child, data, (struct ptrace_bts_config __user *)addr); 1106 (child, data, (struct ptrace_bts_config __user *)addr);
@@ -992,7 +1112,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
992 break; 1112 break;
993 1113
994 case PTRACE_BTS_SIZE: 1114 case PTRACE_BTS_SIZE:
995 ret = ptrace_bts_get_size(child); 1115 ret = ds_get_bts_index(child, /* pos = */ NULL);
996 break; 1116 break;
997 1117
998 case PTRACE_BTS_GET: 1118 case PTRACE_BTS_GET:
@@ -1001,14 +1121,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1001 break; 1121 break;
1002 1122
1003 case PTRACE_BTS_CLEAR: 1123 case PTRACE_BTS_CLEAR:
1004 ret = ptrace_bts_clear(child); 1124 ret = ds_clear_bts(child);
1005 break; 1125 break;
1006 1126
1007 case PTRACE_BTS_DRAIN: 1127 case PTRACE_BTS_DRAIN:
1008 ret = ptrace_bts_drain 1128 ret = ptrace_bts_drain
1009 (child, data, (struct bts_struct __user *) addr); 1129 (child, data, (struct bts_struct __user *) addr);
1010 break; 1130 break;
1011#endif 1131#endif /* CONFIG_X86_PTRACE_BTS */
1012 1132
1013 default: 1133 default:
1014 ret = ptrace_request(child, request, addr, data); 1134 ret = ptrace_request(child, request, addr, data);
@@ -1290,6 +1410,12 @@ static const struct user_regset x86_64_regsets[] = {
1290 .size = sizeof(long), .align = sizeof(long), 1410 .size = sizeof(long), .align = sizeof(long),
1291 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set 1411 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1292 }, 1412 },
1413 [REGSET_IOPERM64] = {
1414 .core_note_type = NT_386_IOPERM,
1415 .n = IO_BITMAP_LONGS,
1416 .size = sizeof(long), .align = sizeof(long),
1417 .active = ioperm_active, .get = ioperm_get
1418 },
1293}; 1419};
1294 1420
1295static const struct user_regset_view user_x86_64_view = { 1421static const struct user_regset_view user_x86_64_view = {
@@ -1336,6 +1462,12 @@ static const struct user_regset x86_32_regsets[] = {
1336 .active = regset_tls_active, 1462 .active = regset_tls_active,
1337 .get = regset_tls_get, .set = regset_tls_set 1463 .get = regset_tls_get, .set = regset_tls_set
1338 }, 1464 },
1465 [REGSET_IOPERM32] = {
1466 .core_note_type = NT_386_IOPERM,
1467 .n = IO_BITMAP_BYTES / sizeof(u32),
1468 .size = sizeof(u32), .align = sizeof(u32),
1469 .active = ioperm_active, .get = ioperm_get
1470 },
1339}; 1471};
1340 1472
1341static const struct user_regset_view user_x86_32_view = { 1473static const struct user_regset_view user_x86_32_view = {
@@ -1357,7 +1489,8 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1357#endif 1489#endif
1358} 1490}
1359 1491
1360void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code) 1492void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1493 int error_code, int si_code)
1361{ 1494{
1362 struct siginfo info; 1495 struct siginfo info;
1363 1496
@@ -1366,7 +1499,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
1366 1499
1367 memset(&info, 0, sizeof(info)); 1500 memset(&info, 0, sizeof(info));
1368 info.si_signo = SIGTRAP; 1501 info.si_signo = SIGTRAP;
1369 info.si_code = TRAP_BRKPT; 1502 info.si_code = si_code;
1370 1503
1371 /* User-mode ip? */ 1504 /* User-mode ip? */
1372 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL; 1505 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
@@ -1375,30 +1508,6 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
1375 force_sig_info(SIGTRAP, &info, tsk); 1508 force_sig_info(SIGTRAP, &info, tsk);
1376} 1509}
1377 1510
1378static void syscall_trace(struct pt_regs *regs)
1379{
1380 if (!(current->ptrace & PT_PTRACED))
1381 return;
1382
1383#if 0
1384 printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n",
1385 current->comm,
1386 regs->ip, regs->sp, regs->ax, regs->orig_ax, __builtin_return_address(0),
1387 current_thread_info()->flags, current->ptrace);
1388#endif
1389
1390 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
1391 ? 0x80 : 0));
1392 /*
1393 * this isn't the same as continuing with a signal, but it will do
1394 * for normal use. strace only continues with a signal if the
1395 * stopping signal is not SIGTRAP. -brl
1396 */
1397 if (current->exit_code) {
1398 send_sig(current->exit_code, current, 1);
1399 current->exit_code = 0;
1400 }
1401}
1402 1511
1403#ifdef CONFIG_X86_32 1512#ifdef CONFIG_X86_32
1404# define IS_IA32 1 1513# define IS_IA32 1
@@ -1432,8 +1541,9 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
1432 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) 1541 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1433 ret = -1L; 1542 ret = -1L;
1434 1543
1435 if (ret || test_thread_flag(TIF_SYSCALL_TRACE)) 1544 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
1436 syscall_trace(regs); 1545 tracehook_report_syscall_entry(regs))
1546 ret = -1L;
1437 1547
1438 if (unlikely(current->audit_context)) { 1548 if (unlikely(current->audit_context)) {
1439 if (IS_IA32) 1549 if (IS_IA32)
@@ -1459,7 +1569,7 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs)
1459 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); 1569 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1460 1570
1461 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1571 if (test_thread_flag(TIF_SYSCALL_TRACE))
1462 syscall_trace(regs); 1572 tracehook_report_syscall_exit(regs, 0);
1463 1573
1464 /* 1574 /*
1465 * If TIF_SYSCALL_EMU is set, we only get here because of 1575 * If TIF_SYSCALL_EMU is set, we only get here because of
@@ -1475,6 +1585,6 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs)
1475 * system call instruction. 1585 * system call instruction.
1476 */ 1586 */
1477 if (test_thread_flag(TIF_SINGLESTEP) && 1587 if (test_thread_flag(TIF_SINGLESTEP) &&
1478 (current->ptrace & PT_PTRACED)) 1588 tracehook_consider_fatal_signal(current, SIGTRAP, SIG_DFL))
1479 send_sigtrap(current, regs, 0); 1589 send_sigtrap(current, regs, 0, TRAP_BRKPT);
1480} 1590}
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 05fbe9a0325a..4f9c55f3a7c0 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -97,6 +97,18 @@ static unsigned pvclock_get_time_values(struct pvclock_shadow_time *dst,
97 return dst->version; 97 return dst->version;
98} 98}
99 99
100unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
101{
102 u64 pv_tsc_khz = 1000000ULL << 32;
103
104 do_div(pv_tsc_khz, src->tsc_to_system_mul);
105 if (src->tsc_shift < 0)
106 pv_tsc_khz <<= -src->tsc_shift;
107 else
108 pv_tsc_khz >>= src->tsc_shift;
109 return pv_tsc_khz;
110}
111
100cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) 112cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
101{ 113{
102 struct pvclock_shadow_time shadow; 114 struct pvclock_shadow_time shadow;
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index d13858818100..67465ed89310 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -35,9 +35,6 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
35 if (!(word & (1 << 13))) { 35 if (!(word & (1 << 13))) {
36 dev_info(&dev->dev, "Intel E7520/7320/7525 detected; " 36 dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
37 "disabling irq balancing and affinity\n"); 37 "disabling irq balancing and affinity\n");
38#ifdef CONFIG_IRQBALANCE
39 irqbalance_disable("");
40#endif
41 noirqdebug_setup(""); 38 noirqdebug_setup("");
42#ifdef CONFIG_PROC_FS 39#ifdef CONFIG_PROC_FS
43 no_irq_affinity = 1; 40 no_irq_affinity = 1;
@@ -354,9 +351,27 @@ static void ati_force_hpet_resume(void)
354 printk(KERN_DEBUG "Force enabled HPET at resume\n"); 351 printk(KERN_DEBUG "Force enabled HPET at resume\n");
355} 352}
356 353
354static u32 ati_ixp4x0_rev(struct pci_dev *dev)
355{
356 u32 d;
357 u8 b;
358
359 pci_read_config_byte(dev, 0xac, &b);
360 b &= ~(1<<5);
361 pci_write_config_byte(dev, 0xac, b);
362 pci_read_config_dword(dev, 0x70, &d);
363 d |= 1<<8;
364 pci_write_config_dword(dev, 0x70, d);
365 pci_read_config_dword(dev, 0x8, &d);
366 d &= 0xff;
367 dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
368 return d;
369}
370
357static void ati_force_enable_hpet(struct pci_dev *dev) 371static void ati_force_enable_hpet(struct pci_dev *dev)
358{ 372{
359 u32 uninitialized_var(val); 373 u32 d, val;
374 u8 b;
360 375
361 if (hpet_address || force_hpet_address) 376 if (hpet_address || force_hpet_address)
362 return; 377 return;
@@ -366,14 +381,33 @@ static void ati_force_enable_hpet(struct pci_dev *dev)
366 return; 381 return;
367 } 382 }
368 383
384 d = ati_ixp4x0_rev(dev);
385 if (d < 0x82)
386 return;
387
388 /* base address */
369 pci_write_config_dword(dev, 0x14, 0xfed00000); 389 pci_write_config_dword(dev, 0x14, 0xfed00000);
370 pci_read_config_dword(dev, 0x14, &val); 390 pci_read_config_dword(dev, 0x14, &val);
391
392 /* enable interrupt */
393 outb(0x72, 0xcd6); b = inb(0xcd7);
394 b |= 0x1;
395 outb(0x72, 0xcd6); outb(b, 0xcd7);
396 outb(0x72, 0xcd6); b = inb(0xcd7);
397 if (!(b & 0x1))
398 return;
399 pci_read_config_dword(dev, 0x64, &d);
400 d |= (1<<10);
401 pci_write_config_dword(dev, 0x64, d);
402 pci_read_config_dword(dev, 0x64, &d);
403 if (!(d & (1<<10)))
404 return;
405
371 force_hpet_address = val; 406 force_hpet_address = val;
372 force_hpet_resume_type = ATI_FORCE_HPET_RESUME; 407 force_hpet_resume_type = ATI_FORCE_HPET_RESUME;
373 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n", 408 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
374 force_hpet_address); 409 force_hpet_address);
375 cached_dev = dev; 410 cached_dev = dev;
376 return;
377} 411}
378DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS, 412DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
379 ati_force_enable_hpet); 413 ati_force_enable_hpet);
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 724adfc63cb9..f4c93f1cfc19 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -29,7 +29,11 @@ EXPORT_SYMBOL(pm_power_off);
29 29
30static const struct desc_ptr no_idt = {}; 30static const struct desc_ptr no_idt = {};
31static int reboot_mode; 31static int reboot_mode;
32enum reboot_type reboot_type = BOOT_KBD; 32/*
33 * Keyboard reset and triple fault may result in INIT, not RESET, which
34 * doesn't work when we're in vmx root mode. Try ACPI first.
35 */
36enum reboot_type reboot_type = BOOT_ACPI;
33int reboot_force; 37int reboot_force;
34 38
35#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) 39#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 05191bbc68b8..dd6f2b71561b 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -52,7 +52,7 @@ int mach_set_rtc_mmss(unsigned long nowtime)
52 52
53 cmos_minutes = CMOS_READ(RTC_MINUTES); 53 cmos_minutes = CMOS_READ(RTC_MINUTES);
54 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 54 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
55 BCD_TO_BIN(cmos_minutes); 55 cmos_minutes = bcd2bin(cmos_minutes);
56 56
57 /* 57 /*
58 * since we're only adjusting minutes and seconds, 58 * since we're only adjusting minutes and seconds,
@@ -69,8 +69,8 @@ int mach_set_rtc_mmss(unsigned long nowtime)
69 69
70 if (abs(real_minutes - cmos_minutes) < 30) { 70 if (abs(real_minutes - cmos_minutes) < 30) {
71 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { 71 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
72 BIN_TO_BCD(real_seconds); 72 real_seconds = bin2bcd(real_seconds);
73 BIN_TO_BCD(real_minutes); 73 real_minutes = bin2bcd(real_minutes);
74 } 74 }
75 CMOS_WRITE(real_seconds,RTC_SECONDS); 75 CMOS_WRITE(real_seconds,RTC_SECONDS);
76 CMOS_WRITE(real_minutes,RTC_MINUTES); 76 CMOS_WRITE(real_minutes,RTC_MINUTES);
@@ -124,16 +124,16 @@ unsigned long mach_get_cmos_time(void)
124 WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY)); 124 WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY));
125 125
126 if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) { 126 if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) {
127 BCD_TO_BIN(sec); 127 sec = bcd2bin(sec);
128 BCD_TO_BIN(min); 128 min = bcd2bin(min);
129 BCD_TO_BIN(hour); 129 hour = bcd2bin(hour);
130 BCD_TO_BIN(day); 130 day = bcd2bin(day);
131 BCD_TO_BIN(mon); 131 mon = bcd2bin(mon);
132 BCD_TO_BIN(year); 132 year = bcd2bin(year);
133 } 133 }
134 134
135 if (century) { 135 if (century) {
136 BCD_TO_BIN(century); 136 century = bcd2bin(century);
137 year += century * 100; 137 year += century * 100;
138 printk(KERN_INFO "Extended CMOS year: %d\n", century * 100); 138 printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
139 } else 139 } else
@@ -223,11 +223,25 @@ static struct platform_device rtc_device = {
223static __init int add_rtc_cmos(void) 223static __init int add_rtc_cmos(void)
224{ 224{
225#ifdef CONFIG_PNP 225#ifdef CONFIG_PNP
226 if (!pnp_platform_devices) 226 static const char *ids[] __initconst =
227 platform_device_register(&rtc_device); 227 { "PNP0b00", "PNP0b01", "PNP0b02", };
228#else 228 struct pnp_dev *dev;
229 struct pnp_id *id;
230 int i;
231
232 pnp_for_each_dev(dev) {
233 for (id = dev->id; id; id = id->next) {
234 for (i = 0; i < ARRAY_SIZE(ids); i++) {
235 if (compare_pnp_id(id, ids[i]) != 0)
236 return 0;
237 }
238 }
239 }
240#endif
241
229 platform_device_register(&rtc_device); 242 platform_device_register(&rtc_device);
230#endif /* CONFIG_PNP */ 243 dev_info(&rtc_device.dev,
244 "registered platform RTC device (no PNP device found)\n");
231 return 0; 245 return 0;
232} 246}
233device_initcall(add_rtc_cmos); 247device_initcall(add_rtc_cmos);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 9838f2539dfc..0fa6790c1dd3 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -223,6 +223,9 @@ unsigned long saved_video_mode;
223#define RAMDISK_LOAD_FLAG 0x4000 223#define RAMDISK_LOAD_FLAG 0x4000
224 224
225static char __initdata command_line[COMMAND_LINE_SIZE]; 225static char __initdata command_line[COMMAND_LINE_SIZE];
226#ifdef CONFIG_CMDLINE_BOOL
227static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
228#endif
226 229
227#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) 230#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
228struct edd edd; 231struct edd edd;
@@ -299,7 +302,7 @@ static void __init relocate_initrd(void)
299 if (clen > MAX_MAP_CHUNK-slop) 302 if (clen > MAX_MAP_CHUNK-slop)
300 clen = MAX_MAP_CHUNK-slop; 303 clen = MAX_MAP_CHUNK-slop;
301 mapaddr = ramdisk_image & PAGE_MASK; 304 mapaddr = ramdisk_image & PAGE_MASK;
302 p = early_ioremap(mapaddr, clen+slop); 305 p = early_memremap(mapaddr, clen+slop);
303 memcpy(q, p+slop, clen); 306 memcpy(q, p+slop, clen);
304 early_iounmap(p, clen+slop); 307 early_iounmap(p, clen+slop);
305 q += clen; 308 q += clen;
@@ -376,7 +379,7 @@ static void __init parse_setup_data(void)
376 return; 379 return;
377 pa_data = boot_params.hdr.setup_data; 380 pa_data = boot_params.hdr.setup_data;
378 while (pa_data) { 381 while (pa_data) {
379 data = early_ioremap(pa_data, PAGE_SIZE); 382 data = early_memremap(pa_data, PAGE_SIZE);
380 switch (data->type) { 383 switch (data->type) {
381 case SETUP_E820_EXT: 384 case SETUP_E820_EXT:
382 parse_e820_ext(data, pa_data); 385 parse_e820_ext(data, pa_data);
@@ -399,7 +402,7 @@ static void __init e820_reserve_setup_data(void)
399 return; 402 return;
400 pa_data = boot_params.hdr.setup_data; 403 pa_data = boot_params.hdr.setup_data;
401 while (pa_data) { 404 while (pa_data) {
402 data = early_ioremap(pa_data, sizeof(*data)); 405 data = early_memremap(pa_data, sizeof(*data));
403 e820_update_range(pa_data, sizeof(*data)+data->len, 406 e820_update_range(pa_data, sizeof(*data)+data->len,
404 E820_RAM, E820_RESERVED_KERN); 407 E820_RAM, E820_RESERVED_KERN);
405 found = 1; 408 found = 1;
@@ -425,7 +428,7 @@ static void __init reserve_early_setup_data(void)
425 return; 428 return;
426 pa_data = boot_params.hdr.setup_data; 429 pa_data = boot_params.hdr.setup_data;
427 while (pa_data) { 430 while (pa_data) {
428 data = early_ioremap(pa_data, sizeof(*data)); 431 data = early_memremap(pa_data, sizeof(*data));
429 sprintf(buf, "setup data %x", data->type); 432 sprintf(buf, "setup data %x", data->type);
430 reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf); 433 reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf);
431 pa_data = data->next; 434 pa_data = data->next;
@@ -558,7 +561,13 @@ static void __init reserve_standard_io_resources(void)
558 561
559} 562}
560 563
561#ifdef CONFIG_PROC_VMCORE 564/*
565 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
566 * is_kdump_kernel() to determine if we are booting after a panic. Hence
567 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
568 */
569
570#ifdef CONFIG_CRASH_DUMP
562/* elfcorehdr= specifies the location of elf core header 571/* elfcorehdr= specifies the location of elf core header
563 * stored by the crashed kernel. This option will be passed 572 * stored by the crashed kernel. This option will be passed
564 * by kexec loader to the capture kernel. 573 * by kexec loader to the capture kernel.
@@ -579,6 +588,190 @@ static struct x86_quirks default_x86_quirks __initdata;
579struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; 588struct x86_quirks *x86_quirks __initdata = &default_x86_quirks;
580 589
581/* 590/*
591 * Some BIOSes seem to corrupt the low 64k of memory during events
592 * like suspend/resume and unplugging an HDMI cable. Reserve all
593 * remaining free memory in that area and fill it with a distinct
594 * pattern.
595 */
596#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
597#define MAX_SCAN_AREAS 8
598
599static int __read_mostly memory_corruption_check = -1;
600
601static unsigned __read_mostly corruption_check_size = 64*1024;
602static unsigned __read_mostly corruption_check_period = 60; /* seconds */
603
604static struct e820entry scan_areas[MAX_SCAN_AREAS];
605static int num_scan_areas;
606
607
608static int set_corruption_check(char *arg)
609{
610 char *end;
611
612 memory_corruption_check = simple_strtol(arg, &end, 10);
613
614 return (*end == 0) ? 0 : -EINVAL;
615}
616early_param("memory_corruption_check", set_corruption_check);
617
618static int set_corruption_check_period(char *arg)
619{
620 char *end;
621
622 corruption_check_period = simple_strtoul(arg, &end, 10);
623
624 return (*end == 0) ? 0 : -EINVAL;
625}
626early_param("memory_corruption_check_period", set_corruption_check_period);
627
628static int set_corruption_check_size(char *arg)
629{
630 char *end;
631 unsigned size;
632
633 size = memparse(arg, &end);
634
635 if (*end == '\0')
636 corruption_check_size = size;
637
638 return (size == corruption_check_size) ? 0 : -EINVAL;
639}
640early_param("memory_corruption_check_size", set_corruption_check_size);
641
642
643static void __init setup_bios_corruption_check(void)
644{
645 u64 addr = PAGE_SIZE; /* assume first page is reserved anyway */
646
647 if (memory_corruption_check == -1) {
648 memory_corruption_check =
649#ifdef CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK
650 1
651#else
652 0
653#endif
654 ;
655 }
656
657 if (corruption_check_size == 0)
658 memory_corruption_check = 0;
659
660 if (!memory_corruption_check)
661 return;
662
663 corruption_check_size = round_up(corruption_check_size, PAGE_SIZE);
664
665 while(addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) {
666 u64 size;
667 addr = find_e820_area_size(addr, &size, PAGE_SIZE);
668
669 if (addr == 0)
670 break;
671
672 if ((addr + size) > corruption_check_size)
673 size = corruption_check_size - addr;
674
675 if (size == 0)
676 break;
677
678 e820_update_range(addr, size, E820_RAM, E820_RESERVED);
679 scan_areas[num_scan_areas].addr = addr;
680 scan_areas[num_scan_areas].size = size;
681 num_scan_areas++;
682
683 /* Assume we've already mapped this early memory */
684 memset(__va(addr), 0, size);
685
686 addr += size;
687 }
688
689 printk(KERN_INFO "Scanning %d areas for low memory corruption\n",
690 num_scan_areas);
691 update_e820();
692}
693
694static struct timer_list periodic_check_timer;
695
696void check_for_bios_corruption(void)
697{
698 int i;
699 int corruption = 0;
700
701 if (!memory_corruption_check)
702 return;
703
704 for(i = 0; i < num_scan_areas; i++) {
705 unsigned long *addr = __va(scan_areas[i].addr);
706 unsigned long size = scan_areas[i].size;
707
708 for(; size; addr++, size -= sizeof(unsigned long)) {
709 if (!*addr)
710 continue;
711 printk(KERN_ERR "Corrupted low memory at %p (%lx phys) = %08lx\n",
712 addr, __pa(addr), *addr);
713 corruption = 1;
714 *addr = 0;
715 }
716 }
717
718 WARN(corruption, KERN_ERR "Memory corruption detected in low memory\n");
719}
720
721static void periodic_check_for_corruption(unsigned long data)
722{
723 check_for_bios_corruption();
724 mod_timer(&periodic_check_timer, round_jiffies(jiffies + corruption_check_period*HZ));
725}
726
727void start_periodic_check_for_corruption(void)
728{
729 if (!memory_corruption_check || corruption_check_period == 0)
730 return;
731
732 printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n",
733 corruption_check_period);
734
735 init_timer(&periodic_check_timer);
736 periodic_check_timer.function = &periodic_check_for_corruption;
737 periodic_check_for_corruption(0);
738}
739#endif
740
741static int __init dmi_low_memory_corruption(const struct dmi_system_id *d)
742{
743 printk(KERN_NOTICE
744 "%s detected: BIOS may corrupt low RAM, working it around.\n",
745 d->ident);
746
747 e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED);
748 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
749
750 return 0;
751}
752
753/* List of systems that have known low memory corruption BIOS problems */
754static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
755#ifdef CONFIG_X86_RESERVE_LOW_64K
756 {
757 .callback = dmi_low_memory_corruption,
758 .ident = "AMI BIOS",
759 .matches = {
760 DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
761 },
762 },
763 {
764 .callback = dmi_low_memory_corruption,
765 .ident = "Phoenix BIOS",
766 .matches = {
767 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
768 },
769 },
770#endif
771 {}
772};
773
774/*
582 * Determine if we were loaded by an EFI loader. If so, then we have also been 775 * Determine if we were loaded by an EFI loader. If so, then we have also been
583 * passed the efi memmap, systab, etc., so we should use these data structures 776 * passed the efi memmap, systab, etc., so we should use these data structures
584 * for initialization. Note, the efi init code path is determined by the 777 * for initialization. Note, the efi init code path is determined by the
@@ -665,6 +858,19 @@ void __init setup_arch(char **cmdline_p)
665 bss_resource.start = virt_to_phys(&__bss_start); 858 bss_resource.start = virt_to_phys(&__bss_start);
666 bss_resource.end = virt_to_phys(&__bss_stop)-1; 859 bss_resource.end = virt_to_phys(&__bss_stop)-1;
667 860
861#ifdef CONFIG_CMDLINE_BOOL
862#ifdef CONFIG_CMDLINE_OVERRIDE
863 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
864#else
865 if (builtin_cmdline[0]) {
866 /* append boot loader cmdline to builtin */
867 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
868 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
869 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
870 }
871#endif
872#endif
873
668 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); 874 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
669 *cmdline_p = command_line; 875 *cmdline_p = command_line;
670 876
@@ -699,6 +905,10 @@ void __init setup_arch(char **cmdline_p)
699 905
700 finish_e820_parsing(); 906 finish_e820_parsing();
701 907
908 dmi_scan_machine();
909
910 dmi_check_system(bad_bios_dmi_table);
911
702#ifdef CONFIG_X86_32 912#ifdef CONFIG_X86_32
703 probe_roms(); 913 probe_roms();
704#endif 914#endif
@@ -742,6 +952,8 @@ void __init setup_arch(char **cmdline_p)
742#else 952#else
743 num_physpages = max_pfn; 953 num_physpages = max_pfn;
744 954
955 if (cpu_has_x2apic)
956 check_x2apic();
745 957
746 /* How many end-of-memory variables you have, grandma! */ 958 /* How many end-of-memory variables you have, grandma! */
747 /* need this before calling reserve_initrd */ 959 /* need this before calling reserve_initrd */
@@ -753,6 +965,10 @@ void __init setup_arch(char **cmdline_p)
753 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; 965 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
754#endif 966#endif
755 967
968#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
969 setup_bios_corruption_check();
970#endif
971
756 /* max_pfn_mapped is updated here */ 972 /* max_pfn_mapped is updated here */
757 max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT); 973 max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
758 max_pfn_mapped = max_low_pfn_mapped; 974 max_pfn_mapped = max_low_pfn_mapped;
@@ -781,8 +997,6 @@ void __init setup_arch(char **cmdline_p)
781 vsmp_init(); 997 vsmp_init();
782#endif 998#endif
783 999
784 dmi_scan_machine();
785
786 io_delay_init(); 1000 io_delay_init();
787 1001
788 /* 1002 /*
@@ -790,6 +1004,8 @@ void __init setup_arch(char **cmdline_p)
790 */ 1004 */
791 acpi_boot_table_init(); 1005 acpi_boot_table_init();
792 1006
1007 early_acpi_boot_init();
1008
793#ifdef CONFIG_ACPI_NUMA 1009#ifdef CONFIG_ACPI_NUMA
794 /* 1010 /*
795 * Parse SRAT to discover nodes. 1011 * Parse SRAT to discover nodes.
@@ -857,6 +1073,7 @@ void __init setup_arch(char **cmdline_p)
857#endif 1073#endif
858 1074
859 prefill_possible_map(); 1075 prefill_possible_map();
1076
860#ifdef CONFIG_X86_64 1077#ifdef CONFIG_X86_64
861 init_cpu_to_node(); 1078 init_cpu_to_node();
862#endif 1079#endif
@@ -864,6 +1081,9 @@ void __init setup_arch(char **cmdline_p)
864 init_apic_mappings(); 1081 init_apic_mappings();
865 ioapic_init_mappings(); 1082 ioapic_init_mappings();
866 1083
1084 /* need to wait for io_apic is mapped */
1085 nr_irqs = probe_nr_irqs();
1086
867 kvm_guest_init(); 1087 kvm_guest_init();
868 1088
869 e820_reserve_resources(); 1089 e820_reserve_resources();
@@ -885,3 +1105,5 @@ void __init setup_arch(char **cmdline_p)
885#endif 1105#endif
886#endif 1106#endif
887} 1107}
1108
1109
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 76e305e064f9..410c88f0bfeb 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -140,35 +140,47 @@ static void __init setup_cpu_pda_map(void)
140 */ 140 */
141void __init setup_per_cpu_areas(void) 141void __init setup_per_cpu_areas(void)
142{ 142{
143 ssize_t size = PERCPU_ENOUGH_ROOM; 143 ssize_t size, old_size;
144 char *ptr; 144 char *ptr;
145 int cpu; 145 int cpu;
146 unsigned long align = 1;
146 147
147 /* Setup cpu_pda map */ 148 /* Setup cpu_pda map */
148 setup_cpu_pda_map(); 149 setup_cpu_pda_map();
149 150
150 /* Copy section for each CPU (we discard the original) */ 151 /* Copy section for each CPU (we discard the original) */
151 size = PERCPU_ENOUGH_ROOM; 152 old_size = PERCPU_ENOUGH_ROOM;
153 align = max_t(unsigned long, PAGE_SIZE, align);
154 size = roundup(old_size, align);
152 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", 155 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
153 size); 156 size);
154 157
155 for_each_possible_cpu(cpu) { 158 for_each_possible_cpu(cpu) {
156#ifndef CONFIG_NEED_MULTIPLE_NODES 159#ifndef CONFIG_NEED_MULTIPLE_NODES
157 ptr = alloc_bootmem_pages(size); 160 ptr = __alloc_bootmem(size, align,
161 __pa(MAX_DMA_ADDRESS));
158#else 162#else
159 int node = early_cpu_to_node(cpu); 163 int node = early_cpu_to_node(cpu);
160 if (!node_online(node) || !NODE_DATA(node)) { 164 if (!node_online(node) || !NODE_DATA(node)) {
161 ptr = alloc_bootmem_pages(size); 165 ptr = __alloc_bootmem(size, align,
166 __pa(MAX_DMA_ADDRESS));
162 printk(KERN_INFO 167 printk(KERN_INFO
163 "cpu %d has no node %d or node-local memory\n", 168 "cpu %d has no node %d or node-local memory\n",
164 cpu, node); 169 cpu, node);
170 if (ptr)
171 printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n",
172 cpu, __pa(ptr));
173 }
174 else {
175 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
176 __pa(MAX_DMA_ADDRESS));
177 if (ptr)
178 printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n",
179 cpu, node, __pa(ptr));
165 } 180 }
166 else
167 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
168#endif 181#endif
169 per_cpu_offset(cpu) = ptr - __per_cpu_start; 182 per_cpu_offset(cpu) = ptr - __per_cpu_start;
170 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 183 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
171
172 } 184 }
173 185
174 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n", 186 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
diff --git a/arch/x86/kernel/sigframe.h b/arch/x86/kernel/sigframe.h
index 72bbb519d2dc..cc673aa55ce4 100644
--- a/arch/x86/kernel/sigframe.h
+++ b/arch/x86/kernel/sigframe.h
@@ -3,9 +3,18 @@ struct sigframe {
3 char __user *pretcode; 3 char __user *pretcode;
4 int sig; 4 int sig;
5 struct sigcontext sc; 5 struct sigcontext sc;
6 struct _fpstate fpstate; 6 /*
7 * fpstate is unused. fpstate is moved/allocated after
8 * retcode[] below. This movement allows to have the FP state and the
9 * future state extensions (xsave) stay together.
10 * And at the same time retaining the unused fpstate, prevents changing
11 * the offset of extramask[] in the sigframe and thus prevent any
12 * legacy application accessing/modifying it.
13 */
14 struct _fpstate fpstate_unused;
7 unsigned long extramask[_NSIG_WORDS-1]; 15 unsigned long extramask[_NSIG_WORDS-1];
8 char retcode[8]; 16 char retcode[8];
17 /* fp state follows here */
9}; 18};
10 19
11struct rt_sigframe { 20struct rt_sigframe {
@@ -15,13 +24,19 @@ struct rt_sigframe {
15 void __user *puc; 24 void __user *puc;
16 struct siginfo info; 25 struct siginfo info;
17 struct ucontext uc; 26 struct ucontext uc;
18 struct _fpstate fpstate;
19 char retcode[8]; 27 char retcode[8];
28 /* fp state follows here */
20}; 29};
21#else 30#else
22struct rt_sigframe { 31struct rt_sigframe {
23 char __user *pretcode; 32 char __user *pretcode;
24 struct ucontext uc; 33 struct ucontext uc;
25 struct siginfo info; 34 struct siginfo info;
35 /* fp state follows here */
26}; 36};
37
38int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
39 sigset_t *set, struct pt_regs *regs);
40int ia32_setup_frame(int sig, struct k_sigaction *ka,
41 sigset_t *set, struct pt_regs *regs);
27#endif 42#endif
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index 6fb5bcdd8933..d6dd057d0f22 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -17,6 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/wait.h> 19#include <linux/wait.h>
20#include <linux/tracehook.h>
20#include <linux/elf.h> 21#include <linux/elf.h>
21#include <linux/smp.h> 22#include <linux/smp.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
@@ -26,6 +27,8 @@
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
27#include <asm/i387.h> 28#include <asm/i387.h>
28#include <asm/vdso.h> 29#include <asm/vdso.h>
30#include <asm/syscall.h>
31#include <asm/syscalls.h>
29 32
30#include "sigframe.h" 33#include "sigframe.h"
31 34
@@ -110,6 +113,27 @@ asmlinkage int sys_sigaltstack(unsigned long bx)
110 return do_sigaltstack(uss, uoss, regs->sp); 113 return do_sigaltstack(uss, uoss, regs->sp);
111} 114}
112 115
116#define COPY(x) { \
117 err |= __get_user(regs->x, &sc->x); \
118}
119
120#define COPY_SEG(seg) { \
121 unsigned short tmp; \
122 err |= __get_user(tmp, &sc->seg); \
123 regs->seg = tmp; \
124}
125
126#define COPY_SEG_STRICT(seg) { \
127 unsigned short tmp; \
128 err |= __get_user(tmp, &sc->seg); \
129 regs->seg = tmp | 3; \
130}
131
132#define GET_SEG(seg) { \
133 unsigned short tmp; \
134 err |= __get_user(tmp, &sc->seg); \
135 loadsegment(seg, tmp); \
136}
113 137
114/* 138/*
115 * Do a signal return; undo the signal stack. 139 * Do a signal return; undo the signal stack.
@@ -118,28 +142,13 @@ static int
118restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, 142restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
119 unsigned long *pax) 143 unsigned long *pax)
120{ 144{
145 void __user *buf;
146 unsigned int tmpflags;
121 unsigned int err = 0; 147 unsigned int err = 0;
122 148
123 /* Always make any pending restarted system calls return -EINTR */ 149 /* Always make any pending restarted system calls return -EINTR */
124 current_thread_info()->restart_block.fn = do_no_restart_syscall; 150 current_thread_info()->restart_block.fn = do_no_restart_syscall;
125 151
126#define COPY(x) err |= __get_user(regs->x, &sc->x)
127
128#define COPY_SEG(seg) \
129 { unsigned short tmp; \
130 err |= __get_user(tmp, &sc->seg); \
131 regs->seg = tmp; }
132
133#define COPY_SEG_STRICT(seg) \
134 { unsigned short tmp; \
135 err |= __get_user(tmp, &sc->seg); \
136 regs->seg = tmp|3; }
137
138#define GET_SEG(seg) \
139 { unsigned short tmp; \
140 err |= __get_user(tmp, &sc->seg); \
141 loadsegment(seg, tmp); }
142
143 GET_SEG(gs); 152 GET_SEG(gs);
144 COPY_SEG(fs); 153 COPY_SEG(fs);
145 COPY_SEG(es); 154 COPY_SEG(es);
@@ -149,38 +158,15 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
149 COPY_SEG_STRICT(cs); 158 COPY_SEG_STRICT(cs);
150 COPY_SEG_STRICT(ss); 159 COPY_SEG_STRICT(ss);
151 160
152 { 161 err |= __get_user(tmpflags, &sc->flags);
153 unsigned int tmpflags; 162 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
154 163 regs->orig_ax = -1; /* disable syscall checks */
155 err |= __get_user(tmpflags, &sc->flags);
156 regs->flags = (regs->flags & ~FIX_EFLAGS) |
157 (tmpflags & FIX_EFLAGS);
158 regs->orig_ax = -1; /* disable syscall checks */
159 }
160 164
161 { 165 err |= __get_user(buf, &sc->fpstate);
162 struct _fpstate __user *buf; 166 err |= restore_i387_xstate(buf);
163
164 err |= __get_user(buf, &sc->fpstate);
165 if (buf) {
166 if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
167 goto badframe;
168 err |= restore_i387(buf);
169 } else {
170 struct task_struct *me = current;
171
172 if (used_math()) {
173 clear_fpu(me);
174 clear_used_math();
175 }
176 }
177 }
178 167
179 err |= __get_user(*pax, &sc->ax); 168 err |= __get_user(*pax, &sc->ax);
180 return err; 169 return err;
181
182badframe:
183 return 1;
184} 170}
185 171
186asmlinkage unsigned long sys_sigreturn(unsigned long __unused) 172asmlinkage unsigned long sys_sigreturn(unsigned long __unused)
@@ -226,9 +212,8 @@ badframe:
226 return 0; 212 return 0;
227} 213}
228 214
229asmlinkage int sys_rt_sigreturn(unsigned long __unused) 215static long do_rt_sigreturn(struct pt_regs *regs)
230{ 216{
231 struct pt_regs *regs = (struct pt_regs *)&__unused;
232 struct rt_sigframe __user *frame; 217 struct rt_sigframe __user *frame;
233 unsigned long ax; 218 unsigned long ax;
234 sigset_t set; 219 sigset_t set;
@@ -254,15 +239,22 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused)
254 return ax; 239 return ax;
255 240
256badframe: 241badframe:
257 force_sig(SIGSEGV, current); 242 signal_fault(regs, frame, "rt_sigreturn");
258 return 0; 243 return 0;
259} 244}
260 245
246asmlinkage int sys_rt_sigreturn(unsigned long __unused)
247{
248 struct pt_regs *regs = (struct pt_regs *)&__unused;
249
250 return do_rt_sigreturn(regs);
251}
252
261/* 253/*
262 * Set up a signal frame. 254 * Set up a signal frame.
263 */ 255 */
264static int 256static int
265setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, 257setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
266 struct pt_regs *regs, unsigned long mask) 258 struct pt_regs *regs, unsigned long mask)
267{ 259{
268 int tmp, err = 0; 260 int tmp, err = 0;
@@ -289,7 +281,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
289 err |= __put_user(regs->sp, &sc->sp_at_signal); 281 err |= __put_user(regs->sp, &sc->sp_at_signal);
290 err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); 282 err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
291 283
292 tmp = save_i387(fpstate); 284 tmp = save_i387_xstate(fpstate);
293 if (tmp < 0) 285 if (tmp < 0)
294 err = 1; 286 err = 1;
295 else 287 else
@@ -306,7 +298,8 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
306 * Determine which stack to use.. 298 * Determine which stack to use..
307 */ 299 */
308static inline void __user * 300static inline void __user *
309get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) 301get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
302 void **fpstate)
310{ 303{
311 unsigned long sp; 304 unsigned long sp;
312 305
@@ -332,6 +325,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
332 sp = (unsigned long) ka->sa.sa_restorer; 325 sp = (unsigned long) ka->sa.sa_restorer;
333 } 326 }
334 327
328 if (used_math()) {
329 sp = sp - sig_xstate_size;
330 *fpstate = (struct _fpstate *) sp;
331 }
332
335 sp -= frame_size; 333 sp -= frame_size;
336 /* 334 /*
337 * Align the stack pointer according to the i386 ABI, 335 * Align the stack pointer according to the i386 ABI,
@@ -343,38 +341,29 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
343} 341}
344 342
345static int 343static int
346setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, 344__setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
347 struct pt_regs *regs) 345 struct pt_regs *regs)
348{ 346{
349 struct sigframe __user *frame; 347 struct sigframe __user *frame;
350 void __user *restorer; 348 void __user *restorer;
351 int err = 0; 349 int err = 0;
352 int usig; 350 void __user *fpstate = NULL;
353 351
354 frame = get_sigframe(ka, regs, sizeof(*frame)); 352 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
355 353
356 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 354 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
357 goto give_sigsegv; 355 return -EFAULT;
358 356
359 usig = current_thread_info()->exec_domain 357 if (__put_user(sig, &frame->sig))
360 && current_thread_info()->exec_domain->signal_invmap 358 return -EFAULT;
361 && sig < 32
362 ? current_thread_info()->exec_domain->signal_invmap[sig]
363 : sig;
364 359
365 err = __put_user(usig, &frame->sig); 360 if (setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0]))
366 if (err) 361 return -EFAULT;
367 goto give_sigsegv;
368
369 err = setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]);
370 if (err)
371 goto give_sigsegv;
372 362
373 if (_NSIG_WORDS > 1) { 363 if (_NSIG_WORDS > 1) {
374 err = __copy_to_user(&frame->extramask, &set->sig[1], 364 if (__copy_to_user(&frame->extramask, &set->sig[1],
375 sizeof(frame->extramask)); 365 sizeof(frame->extramask)))
376 if (err) 366 return -EFAULT;
377 goto give_sigsegv;
378 } 367 }
379 368
380 if (current->mm->context.vdso) 369 if (current->mm->context.vdso)
@@ -399,7 +388,7 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
399 err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); 388 err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
400 389
401 if (err) 390 if (err)
402 goto give_sigsegv; 391 return -EFAULT;
403 392
404 /* Set up registers for signal handler */ 393 /* Set up registers for signal handler */
405 regs->sp = (unsigned long)frame; 394 regs->sp = (unsigned long)frame;
@@ -414,50 +403,43 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
414 regs->cs = __USER_CS; 403 regs->cs = __USER_CS;
415 404
416 return 0; 405 return 0;
417
418give_sigsegv:
419 force_sigsegv(sig, current);
420 return -EFAULT;
421} 406}
422 407
423static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 408static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
424 sigset_t *set, struct pt_regs *regs) 409 sigset_t *set, struct pt_regs *regs)
425{ 410{
426 struct rt_sigframe __user *frame; 411 struct rt_sigframe __user *frame;
427 void __user *restorer; 412 void __user *restorer;
428 int err = 0; 413 int err = 0;
429 int usig; 414 void __user *fpstate = NULL;
430 415
431 frame = get_sigframe(ka, regs, sizeof(*frame)); 416 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
432 417
433 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 418 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
434 goto give_sigsegv; 419 return -EFAULT;
435 420
436 usig = current_thread_info()->exec_domain 421 err |= __put_user(sig, &frame->sig);
437 && current_thread_info()->exec_domain->signal_invmap
438 && sig < 32
439 ? current_thread_info()->exec_domain->signal_invmap[sig]
440 : sig;
441
442 err |= __put_user(usig, &frame->sig);
443 err |= __put_user(&frame->info, &frame->pinfo); 422 err |= __put_user(&frame->info, &frame->pinfo);
444 err |= __put_user(&frame->uc, &frame->puc); 423 err |= __put_user(&frame->uc, &frame->puc);
445 err |= copy_siginfo_to_user(&frame->info, info); 424 err |= copy_siginfo_to_user(&frame->info, info);
446 if (err) 425 if (err)
447 goto give_sigsegv; 426 return -EFAULT;
448 427
449 /* Create the ucontext. */ 428 /* Create the ucontext. */
450 err |= __put_user(0, &frame->uc.uc_flags); 429 if (cpu_has_xsave)
430 err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
431 else
432 err |= __put_user(0, &frame->uc.uc_flags);
451 err |= __put_user(0, &frame->uc.uc_link); 433 err |= __put_user(0, &frame->uc.uc_link);
452 err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); 434 err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
453 err |= __put_user(sas_ss_flags(regs->sp), 435 err |= __put_user(sas_ss_flags(regs->sp),
454 &frame->uc.uc_stack.ss_flags); 436 &frame->uc.uc_stack.ss_flags);
455 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); 437 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
456 err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, 438 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
457 regs, set->sig[0]); 439 regs, set->sig[0]);
458 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 440 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
459 if (err) 441 if (err)
460 goto give_sigsegv; 442 return -EFAULT;
461 443
462 /* Set up to return from userspace. */ 444 /* Set up to return from userspace. */
463 restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); 445 restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
@@ -477,12 +459,12 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
477 err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); 459 err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
478 460
479 if (err) 461 if (err)
480 goto give_sigsegv; 462 return -EFAULT;
481 463
482 /* Set up registers for signal handler */ 464 /* Set up registers for signal handler */
483 regs->sp = (unsigned long)frame; 465 regs->sp = (unsigned long)frame;
484 regs->ip = (unsigned long)ka->sa.sa_handler; 466 regs->ip = (unsigned long)ka->sa.sa_handler;
485 regs->ax = (unsigned long)usig; 467 regs->ax = (unsigned long)sig;
486 regs->dx = (unsigned long)&frame->info; 468 regs->dx = (unsigned long)&frame->info;
487 regs->cx = (unsigned long)&frame->uc; 469 regs->cx = (unsigned long)&frame->uc;
488 470
@@ -492,15 +474,48 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
492 regs->cs = __USER_CS; 474 regs->cs = __USER_CS;
493 475
494 return 0; 476 return 0;
495
496give_sigsegv:
497 force_sigsegv(sig, current);
498 return -EFAULT;
499} 477}
500 478
501/* 479/*
502 * OK, we're invoking a handler: 480 * OK, we're invoking a handler:
503 */ 481 */
482static int signr_convert(int sig)
483{
484 struct thread_info *info = current_thread_info();
485
486 if (info->exec_domain && info->exec_domain->signal_invmap && sig < 32)
487 return info->exec_domain->signal_invmap[sig];
488 return sig;
489}
490
491#define is_ia32 1
492#define ia32_setup_frame __setup_frame
493#define ia32_setup_rt_frame __setup_rt_frame
494
495static int
496setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
497 sigset_t *set, struct pt_regs *regs)
498{
499 int usig = signr_convert(sig);
500 int ret;
501
502 /* Set up the stack frame */
503 if (is_ia32) {
504 if (ka->sa.sa_flags & SA_SIGINFO)
505 ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
506 else
507 ret = ia32_setup_frame(usig, ka, set, regs);
508 } else
509 ret = __setup_rt_frame(sig, ka, info, set, regs);
510
511 if (ret) {
512 force_sigsegv(sig, current);
513 return -EFAULT;
514 }
515
516 return ret;
517}
518
504static int 519static int
505handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, 520handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
506 sigset_t *oldset, struct pt_regs *regs) 521 sigset_t *oldset, struct pt_regs *regs)
@@ -508,9 +523,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
508 int ret; 523 int ret;
509 524
510 /* Are we from a system call? */ 525 /* Are we from a system call? */
511 if ((long)regs->orig_ax >= 0) { 526 if (syscall_get_nr(current, regs) >= 0) {
512 /* If so, check system call restarting.. */ 527 /* If so, check system call restarting.. */
513 switch (regs->ax) { 528 switch (syscall_get_error(current, regs)) {
514 case -ERESTART_RESTARTBLOCK: 529 case -ERESTART_RESTARTBLOCK:
515 case -ERESTARTNOHAND: 530 case -ERESTARTNOHAND:
516 regs->ax = -EINTR; 531 regs->ax = -EINTR;
@@ -537,15 +552,20 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
537 likely(test_and_clear_thread_flag(TIF_FORCED_TF))) 552 likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
538 regs->flags &= ~X86_EFLAGS_TF; 553 regs->flags &= ~X86_EFLAGS_TF;
539 554
540 /* Set up the stack frame */ 555 ret = setup_rt_frame(sig, ka, info, oldset, regs);
541 if (ka->sa.sa_flags & SA_SIGINFO)
542 ret = setup_rt_frame(sig, ka, info, oldset, regs);
543 else
544 ret = setup_frame(sig, ka, oldset, regs);
545 556
546 if (ret) 557 if (ret)
547 return ret; 558 return ret;
548 559
560#ifdef CONFIG_X86_64
561 /*
562 * This has nothing to do with segment registers,
563 * despite the name. This magic affects uaccess.h
564 * macros' behavior. Reset it to the normal setting.
565 */
566 set_fs(USER_DS);
567#endif
568
549 /* 569 /*
550 * Clear the direction flag as per the ABI for function entry. 570 * Clear the direction flag as per the ABI for function entry.
551 */ 571 */
@@ -558,8 +578,6 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
558 * handler too. 578 * handler too.
559 */ 579 */
560 regs->flags &= ~X86_EFLAGS_TF; 580 regs->flags &= ~X86_EFLAGS_TF;
561 if (test_thread_flag(TIF_SINGLESTEP))
562 ptrace_notify(SIGTRAP);
563 581
564 spin_lock_irq(&current->sighand->siglock); 582 spin_lock_irq(&current->sighand->siglock);
565 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); 583 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
@@ -568,9 +586,13 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
568 recalc_sigpending(); 586 recalc_sigpending();
569 spin_unlock_irq(&current->sighand->siglock); 587 spin_unlock_irq(&current->sighand->siglock);
570 588
589 tracehook_signal_handler(sig, info, ka, regs,
590 test_thread_flag(TIF_SINGLESTEP));
591
571 return 0; 592 return 0;
572} 593}
573 594
595#define NR_restart_syscall __NR_restart_syscall
574/* 596/*
575 * Note that 'init' is a special process: it doesn't get signals it doesn't 597 * Note that 'init' is a special process: it doesn't get signals it doesn't
576 * want to handle. Thus you cannot kill init even with a SIGKILL even by 598 * want to handle. Thus you cannot kill init even with a SIGKILL even by
@@ -623,9 +645,9 @@ static void do_signal(struct pt_regs *regs)
623 } 645 }
624 646
625 /* Did we come from a system call? */ 647 /* Did we come from a system call? */
626 if ((long)regs->orig_ax >= 0) { 648 if (syscall_get_nr(current, regs) >= 0) {
627 /* Restart the system call - no handlers present */ 649 /* Restart the system call - no handlers present */
628 switch (regs->ax) { 650 switch (syscall_get_error(current, regs)) {
629 case -ERESTARTNOHAND: 651 case -ERESTARTNOHAND:
630 case -ERESTARTSYS: 652 case -ERESTARTSYS:
631 case -ERESTARTNOINTR: 653 case -ERESTARTNOINTR:
@@ -634,7 +656,7 @@ static void do_signal(struct pt_regs *regs)
634 break; 656 break;
635 657
636 case -ERESTART_RESTARTBLOCK: 658 case -ERESTART_RESTARTBLOCK:
637 regs->ax = __NR_restart_syscall; 659 regs->ax = NR_restart_syscall;
638 regs->ip -= 2; 660 regs->ip -= 2;
639 break; 661 break;
640 } 662 }
@@ -657,9 +679,38 @@ static void do_signal(struct pt_regs *regs)
657void 679void
658do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) 680do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
659{ 681{
682#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
683 /* notify userspace of pending MCEs */
684 if (thread_info_flags & _TIF_MCE_NOTIFY)
685 mce_notify_user();
686#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
687
660 /* deal with pending signal delivery */ 688 /* deal with pending signal delivery */
661 if (thread_info_flags & _TIF_SIGPENDING) 689 if (thread_info_flags & _TIF_SIGPENDING)
662 do_signal(regs); 690 do_signal(regs);
663 691
692 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
693 clear_thread_flag(TIF_NOTIFY_RESUME);
694 tracehook_notify_resume(regs);
695 }
696
697#ifdef CONFIG_X86_32
664 clear_thread_flag(TIF_IRET); 698 clear_thread_flag(TIF_IRET);
699#endif /* CONFIG_X86_32 */
700}
701
702void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
703{
704 struct task_struct *me = current;
705
706 if (show_unhandled_signals && printk_ratelimit()) {
707 printk(KERN_INFO
708 "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
709 me->comm, me->pid, where, frame,
710 regs->ip, regs->sp, regs->orig_ax);
711 print_vma_addr(" in ", regs->ip);
712 printk(KERN_CONT "\n");
713 }
714
715 force_sig(SIGSEGV, me);
665} 716}
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index ca316b5b742c..a5c9627f4db9 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -15,17 +15,21 @@
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/wait.h> 16#include <linux/wait.h>
17#include <linux/ptrace.h> 17#include <linux/ptrace.h>
18#include <linux/tracehook.h>
18#include <linux/unistd.h> 19#include <linux/unistd.h>
19#include <linux/stddef.h> 20#include <linux/stddef.h>
20#include <linux/personality.h> 21#include <linux/personality.h>
21#include <linux/compiler.h> 22#include <linux/compiler.h>
23#include <linux/uaccess.h>
24
22#include <asm/processor.h> 25#include <asm/processor.h>
23#include <asm/ucontext.h> 26#include <asm/ucontext.h>
24#include <asm/uaccess.h>
25#include <asm/i387.h> 27#include <asm/i387.h>
26#include <asm/proto.h> 28#include <asm/proto.h>
27#include <asm/ia32_unistd.h> 29#include <asm/ia32_unistd.h>
28#include <asm/mce.h> 30#include <asm/mce.h>
31#include <asm/syscall.h>
32#include <asm/syscalls.h>
29#include "sigframe.h" 33#include "sigframe.h"
30 34
31#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 35#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
@@ -41,11 +45,6 @@
41# define FIX_EFLAGS __FIX_EFLAGS 45# define FIX_EFLAGS __FIX_EFLAGS
42#endif 46#endif
43 47
44int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
45 sigset_t *set, struct pt_regs * regs);
46int ia32_setup_frame(int sig, struct k_sigaction *ka,
47 sigset_t *set, struct pt_regs * regs);
48
49asmlinkage long 48asmlinkage long
50sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 49sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
51 struct pt_regs *regs) 50 struct pt_regs *regs)
@@ -53,67 +52,14 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
53 return do_sigaltstack(uss, uoss, regs->sp); 52 return do_sigaltstack(uss, uoss, regs->sp);
54} 53}
55 54
56/* 55#define COPY(x) { \
57 * Signal frame handlers. 56 err |= __get_user(regs->x, &sc->x); \
58 */
59
60static inline int save_i387(struct _fpstate __user *buf)
61{
62 struct task_struct *tsk = current;
63 int err = 0;
64
65 BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
66 sizeof(tsk->thread.xstate->fxsave));
67
68 if ((unsigned long)buf % 16)
69 printk("save_i387: bad fpstate %p\n", buf);
70
71 if (!used_math())
72 return 0;
73 clear_used_math(); /* trigger finit */
74 if (task_thread_info(tsk)->status & TS_USEDFPU) {
75 err = save_i387_checking((struct i387_fxsave_struct __user *)
76 buf);
77 if (err)
78 return err;
79 task_thread_info(tsk)->status &= ~TS_USEDFPU;
80 stts();
81 } else {
82 if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
83 sizeof(struct i387_fxsave_struct)))
84 return -1;
85 }
86 return 1;
87} 57}
88 58
89/* 59#define COPY_SEG_STRICT(seg) { \
90 * This restores directly out of user space. Exceptions are handled. 60 unsigned short tmp; \
91 */ 61 err |= __get_user(tmp, &sc->seg); \
92static inline int restore_i387(struct _fpstate __user *buf) 62 regs->seg = tmp | 3; \
93{
94 struct task_struct *tsk = current;
95 int err;
96
97 if (!used_math()) {
98 err = init_fpu(tsk);
99 if (err)
100 return err;
101 }
102
103 if (!(task_thread_info(current)->status & TS_USEDFPU)) {
104 clts();
105 task_thread_info(current)->status |= TS_USEDFPU;
106 }
107 err = restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
108 if (unlikely(err)) {
109 /*
110 * Encountered an error while doing the restore from the
111 * user buffer, clear the fpu state.
112 */
113 clear_fpu(tsk);
114 clear_used_math();
115 }
116 return err;
117} 63}
118 64
119/* 65/*
@@ -123,13 +69,13 @@ static int
123restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, 69restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
124 unsigned long *pax) 70 unsigned long *pax)
125{ 71{
72 void __user *buf;
73 unsigned int tmpflags;
126 unsigned int err = 0; 74 unsigned int err = 0;
127 75
128 /* Always make any pending restarted system calls return -EINTR */ 76 /* Always make any pending restarted system calls return -EINTR */
129 current_thread_info()->restart_block.fn = do_no_restart_syscall; 77 current_thread_info()->restart_block.fn = do_no_restart_syscall;
130 78
131#define COPY(x) err |= __get_user(regs->x, &sc->x)
132
133 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); 79 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
134 COPY(dx); COPY(cx); COPY(ip); 80 COPY(dx); COPY(cx); COPY(ip);
135 COPY(r8); 81 COPY(r8);
@@ -144,48 +90,24 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
144 /* Kernel saves and restores only the CS segment register on signals, 90 /* Kernel saves and restores only the CS segment register on signals,
145 * which is the bare minimum needed to allow mixed 32/64-bit code. 91 * which is the bare minimum needed to allow mixed 32/64-bit code.
146 * App's signal handler can save/restore other segments if needed. */ 92 * App's signal handler can save/restore other segments if needed. */
147 { 93 COPY_SEG_STRICT(cs);
148 unsigned cs;
149 err |= __get_user(cs, &sc->cs);
150 regs->cs = cs | 3; /* Force into user mode */
151 }
152 94
153 { 95 err |= __get_user(tmpflags, &sc->flags);
154 unsigned int tmpflags; 96 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
155 err |= __get_user(tmpflags, &sc->flags); 97 regs->orig_ax = -1; /* disable syscall checks */
156 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
157 regs->orig_ax = -1; /* disable syscall checks */
158 }
159 98
160 { 99 err |= __get_user(buf, &sc->fpstate);
161 struct _fpstate __user * buf; 100 err |= restore_i387_xstate(buf);
162 err |= __get_user(buf, &sc->fpstate);
163
164 if (buf) {
165 if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
166 goto badframe;
167 err |= restore_i387(buf);
168 } else {
169 struct task_struct *me = current;
170 if (used_math()) {
171 clear_fpu(me);
172 clear_used_math();
173 }
174 }
175 }
176 101
177 err |= __get_user(*pax, &sc->ax); 102 err |= __get_user(*pax, &sc->ax);
178 return err; 103 return err;
179
180badframe:
181 return 1;
182} 104}
183 105
184asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) 106static long do_rt_sigreturn(struct pt_regs *regs)
185{ 107{
186 struct rt_sigframe __user *frame; 108 struct rt_sigframe __user *frame;
187 sigset_t set;
188 unsigned long ax; 109 unsigned long ax;
110 sigset_t set;
189 111
190 frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); 112 frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
191 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 113 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@@ -198,7 +120,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
198 current->blocked = set; 120 current->blocked = set;
199 recalc_sigpending(); 121 recalc_sigpending();
200 spin_unlock_irq(&current->sighand->siglock); 122 spin_unlock_irq(&current->sighand->siglock);
201 123
202 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) 124 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
203 goto badframe; 125 goto badframe;
204 126
@@ -208,16 +130,22 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
208 return ax; 130 return ax;
209 131
210badframe: 132badframe:
211 signal_fault(regs,frame,"sigreturn"); 133 signal_fault(regs, frame, "rt_sigreturn");
212 return 0; 134 return 0;
213} 135}
136
137asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
138{
139 return do_rt_sigreturn(regs);
140}
214 141
215/* 142/*
216 * Set up a signal frame. 143 * Set up a signal frame.
217 */ 144 */
218 145
219static inline int 146static inline int
220setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, struct task_struct *me) 147setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
148 unsigned long mask, struct task_struct *me)
221{ 149{
222 int err = 0; 150 int err = 0;
223 151
@@ -269,41 +197,40 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
269 sp = current->sas_ss_sp + current->sas_ss_size; 197 sp = current->sas_ss_sp + current->sas_ss_size;
270 } 198 }
271 199
272 return (void __user *)round_down(sp - size, 16); 200 return (void __user *)round_down(sp - size, 64);
273} 201}
274 202
275static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 203static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
276 sigset_t *set, struct pt_regs * regs) 204 sigset_t *set, struct pt_regs *regs)
277{ 205{
278 struct rt_sigframe __user *frame; 206 struct rt_sigframe __user *frame;
279 struct _fpstate __user *fp = NULL; 207 void __user *fp = NULL;
280 int err = 0; 208 int err = 0;
281 struct task_struct *me = current; 209 struct task_struct *me = current;
282 210
283 if (used_math()) { 211 if (used_math()) {
284 fp = get_stack(ka, regs, sizeof(struct _fpstate)); 212 fp = get_stack(ka, regs, sig_xstate_size);
285 frame = (void __user *)round_down( 213 frame = (void __user *)round_down(
286 (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; 214 (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
287 215
288 if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) 216 if (save_i387_xstate(fp) < 0)
289 goto give_sigsegv; 217 return -EFAULT;
290
291 if (save_i387(fp) < 0)
292 err |= -1;
293 } else 218 } else
294 frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; 219 frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8;
295 220
296 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 221 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
297 goto give_sigsegv; 222 return -EFAULT;
298 223
299 if (ka->sa.sa_flags & SA_SIGINFO) { 224 if (ka->sa.sa_flags & SA_SIGINFO) {
300 err |= copy_siginfo_to_user(&frame->info, info); 225 if (copy_siginfo_to_user(&frame->info, info))
301 if (err) 226 return -EFAULT;
302 goto give_sigsegv;
303 } 227 }
304 228
305 /* Create the ucontext. */ 229 /* Create the ucontext. */
306 err |= __put_user(0, &frame->uc.uc_flags); 230 if (cpu_has_xsave)
231 err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
232 else
233 err |= __put_user(0, &frame->uc.uc_flags);
307 err |= __put_user(0, &frame->uc.uc_link); 234 err |= __put_user(0, &frame->uc.uc_link);
308 err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); 235 err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
309 err |= __put_user(sas_ss_flags(regs->sp), 236 err |= __put_user(sas_ss_flags(regs->sp),
@@ -311,9 +238,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
311 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); 238 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
312 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me); 239 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
313 err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); 240 err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate);
314 if (sizeof(*set) == 16) { 241 if (sizeof(*set) == 16) {
315 __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); 242 __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
316 __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); 243 __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
317 } else 244 } else
318 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 245 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
319 246
@@ -324,15 +251,15 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
324 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); 251 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
325 } else { 252 } else {
326 /* could use a vstub here */ 253 /* could use a vstub here */
327 goto give_sigsegv; 254 return -EFAULT;
328 } 255 }
329 256
330 if (err) 257 if (err)
331 goto give_sigsegv; 258 return -EFAULT;
332 259
333 /* Set up registers for signal handler */ 260 /* Set up registers for signal handler */
334 regs->di = sig; 261 regs->di = sig;
335 /* In case the signal handler was declared without prototypes */ 262 /* In case the signal handler was declared without prototypes */
336 regs->ax = 0; 263 regs->ax = 0;
337 264
338 /* This also works for non SA_SIGINFO handlers because they expect the 265 /* This also works for non SA_SIGINFO handlers because they expect the
@@ -348,44 +275,45 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
348 regs->cs = __USER_CS; 275 regs->cs = __USER_CS;
349 276
350 return 0; 277 return 0;
351
352give_sigsegv:
353 force_sigsegv(sig, current);
354 return -EFAULT;
355} 278}
356 279
357/* 280/*
358 * Return -1L or the syscall number that @regs is executing. 281 * OK, we're invoking a handler
359 */ 282 */
360static long current_syscall(struct pt_regs *regs) 283static int signr_convert(int sig)
361{ 284{
362 /* 285 return sig;
363 * We always sign-extend a -1 value being set here,
364 * so this is always either -1L or a syscall number.
365 */
366 return regs->orig_ax;
367} 286}
368 287
369/*
370 * Return a value that is -EFOO if the system call in @regs->orig_ax
371 * returned an error. This only works for @regs from @current.
372 */
373static long current_syscall_ret(struct pt_regs *regs)
374{
375#ifdef CONFIG_IA32_EMULATION 288#ifdef CONFIG_IA32_EMULATION
376 if (test_thread_flag(TIF_IA32)) 289#define is_ia32 test_thread_flag(TIF_IA32)
377 /* 290#else
378 * Sign-extend the value so (int)-EFOO becomes (long)-EFOO 291#define is_ia32 0
379 * and will match correctly in comparisons.
380 */
381 return (int) regs->ax;
382#endif 292#endif
383 return regs->ax;
384}
385 293
386/* 294static int
387 * OK, we're invoking a handler 295setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
388 */ 296 sigset_t *set, struct pt_regs *regs)
297{
298 int usig = signr_convert(sig);
299 int ret;
300
301 /* Set up the stack frame */
302 if (is_ia32) {
303 if (ka->sa.sa_flags & SA_SIGINFO)
304 ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
305 else
306 ret = ia32_setup_frame(usig, ka, set, regs);
307 } else
308 ret = __setup_rt_frame(sig, ka, info, set, regs);
309
310 if (ret) {
311 force_sigsegv(sig, current);
312 return -EFAULT;
313 }
314
315 return ret;
316}
389 317
390static int 318static int
391handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, 319handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
@@ -394,9 +322,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
394 int ret; 322 int ret;
395 323
396 /* Are we from a system call? */ 324 /* Are we from a system call? */
397 if (current_syscall(regs) >= 0) { 325 if (syscall_get_nr(current, regs) >= 0) {
398 /* If so, check system call restarting.. */ 326 /* If so, check system call restarting.. */
399 switch (current_syscall_ret(regs)) { 327 switch (syscall_get_error(current, regs)) {
400 case -ERESTART_RESTARTBLOCK: 328 case -ERESTART_RESTARTBLOCK:
401 case -ERESTARTNOHAND: 329 case -ERESTARTNOHAND:
402 regs->ax = -EINTR; 330 regs->ax = -EINTR;
@@ -423,50 +351,48 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
423 likely(test_and_clear_thread_flag(TIF_FORCED_TF))) 351 likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
424 regs->flags &= ~X86_EFLAGS_TF; 352 regs->flags &= ~X86_EFLAGS_TF;
425 353
426#ifdef CONFIG_IA32_EMULATION
427 if (test_thread_flag(TIF_IA32)) {
428 if (ka->sa.sa_flags & SA_SIGINFO)
429 ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs);
430 else
431 ret = ia32_setup_frame(sig, ka, oldset, regs);
432 } else
433#endif
434 ret = setup_rt_frame(sig, ka, info, oldset, regs); 354 ret = setup_rt_frame(sig, ka, info, oldset, regs);
435 355
436 if (ret == 0) { 356 if (ret)
437 /* 357 return ret;
438 * This has nothing to do with segment registers,
439 * despite the name. This magic affects uaccess.h
440 * macros' behavior. Reset it to the normal setting.
441 */
442 set_fs(USER_DS);
443 358
444 /* 359#ifdef CONFIG_X86_64
445 * Clear the direction flag as per the ABI for function entry. 360 /*
446 */ 361 * This has nothing to do with segment registers,
447 regs->flags &= ~X86_EFLAGS_DF; 362 * despite the name. This magic affects uaccess.h
363 * macros' behavior. Reset it to the normal setting.
364 */
365 set_fs(USER_DS);
366#endif
448 367
449 /* 368 /*
450 * Clear TF when entering the signal handler, but 369 * Clear the direction flag as per the ABI for function entry.
451 * notify any tracer that was single-stepping it. 370 */
452 * The tracer may want to single-step inside the 371 regs->flags &= ~X86_EFLAGS_DF;
453 * handler too.
454 */
455 regs->flags &= ~X86_EFLAGS_TF;
456 if (test_thread_flag(TIF_SINGLESTEP))
457 ptrace_notify(SIGTRAP);
458
459 spin_lock_irq(&current->sighand->siglock);
460 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
461 if (!(ka->sa.sa_flags & SA_NODEFER))
462 sigaddset(&current->blocked,sig);
463 recalc_sigpending();
464 spin_unlock_irq(&current->sighand->siglock);
465 }
466 372
467 return ret; 373 /*
374 * Clear TF when entering the signal handler, but
375 * notify any tracer that was single-stepping it.
376 * The tracer may want to single-step inside the
377 * handler too.
378 */
379 regs->flags &= ~X86_EFLAGS_TF;
380
381 spin_lock_irq(&current->sighand->siglock);
382 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
383 if (!(ka->sa.sa_flags & SA_NODEFER))
384 sigaddset(&current->blocked, sig);
385 recalc_sigpending();
386 spin_unlock_irq(&current->sighand->siglock);
387
388 tracehook_signal_handler(sig, info, ka, regs,
389 test_thread_flag(TIF_SINGLESTEP));
390
391 return 0;
468} 392}
469 393
394#define NR_restart_syscall \
395 test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
470/* 396/*
471 * Note that 'init' is a special process: it doesn't get signals it doesn't 397 * Note that 'init' is a special process: it doesn't get signals it doesn't
472 * want to handle. Thus you cannot kill init even with a SIGKILL even by 398 * want to handle. Thus you cannot kill init even with a SIGKILL even by
@@ -496,7 +422,8 @@ static void do_signal(struct pt_regs *regs)
496 422
497 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 423 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
498 if (signr > 0) { 424 if (signr > 0) {
499 /* Re-enable any watchpoints before delivering the 425 /*
426 * Re-enable any watchpoints before delivering the
500 * signal to user space. The processor register will 427 * signal to user space. The processor register will
501 * have been cleared if the watchpoint triggered 428 * have been cleared if the watchpoint triggered
502 * inside the kernel. 429 * inside the kernel.
@@ -504,7 +431,7 @@ static void do_signal(struct pt_regs *regs)
504 if (current->thread.debugreg7) 431 if (current->thread.debugreg7)
505 set_debugreg(current->thread.debugreg7, 7); 432 set_debugreg(current->thread.debugreg7, 7);
506 433
507 /* Whee! Actually deliver the signal. */ 434 /* Whee! Actually deliver the signal. */
508 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { 435 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
509 /* 436 /*
510 * A signal was successfully delivered; the saved 437 * A signal was successfully delivered; the saved
@@ -518,19 +445,18 @@ static void do_signal(struct pt_regs *regs)
518 } 445 }
519 446
520 /* Did we come from a system call? */ 447 /* Did we come from a system call? */
521 if (current_syscall(regs) >= 0) { 448 if (syscall_get_nr(current, regs) >= 0) {
522 /* Restart the system call - no handlers present */ 449 /* Restart the system call - no handlers present */
523 switch (current_syscall_ret(regs)) { 450 switch (syscall_get_error(current, regs)) {
524 case -ERESTARTNOHAND: 451 case -ERESTARTNOHAND:
525 case -ERESTARTSYS: 452 case -ERESTARTSYS:
526 case -ERESTARTNOINTR: 453 case -ERESTARTNOINTR:
527 regs->ax = regs->orig_ax; 454 regs->ax = regs->orig_ax;
528 regs->ip -= 2; 455 regs->ip -= 2;
529 break; 456 break;
457
530 case -ERESTART_RESTARTBLOCK: 458 case -ERESTART_RESTARTBLOCK:
531 regs->ax = test_thread_flag(TIF_IA32) ? 459 regs->ax = NR_restart_syscall;
532 __NR_ia32_restart_syscall :
533 __NR_restart_syscall;
534 regs->ip -= 2; 460 regs->ip -= 2;
535 break; 461 break;
536 } 462 }
@@ -546,29 +472,45 @@ static void do_signal(struct pt_regs *regs)
546 } 472 }
547} 473}
548 474
549void do_notify_resume(struct pt_regs *regs, void *unused, 475/*
550 __u32 thread_info_flags) 476 * notification of userspace execution resumption
477 * - triggered by the TIF_WORK_MASK flags
478 */
479void
480do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
551{ 481{
552#ifdef CONFIG_X86_MCE 482#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
553 /* notify userspace of pending MCEs */ 483 /* notify userspace of pending MCEs */
554 if (thread_info_flags & _TIF_MCE_NOTIFY) 484 if (thread_info_flags & _TIF_MCE_NOTIFY)
555 mce_notify_user(); 485 mce_notify_user();
556#endif /* CONFIG_X86_MCE */ 486#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
557 487
558 /* deal with pending signal delivery */ 488 /* deal with pending signal delivery */
559 if (thread_info_flags & _TIF_SIGPENDING) 489 if (thread_info_flags & _TIF_SIGPENDING)
560 do_signal(regs); 490 do_signal(regs);
491
492 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
493 clear_thread_flag(TIF_NOTIFY_RESUME);
494 tracehook_notify_resume(regs);
495 }
496
497#ifdef CONFIG_X86_32
498 clear_thread_flag(TIF_IRET);
499#endif /* CONFIG_X86_32 */
561} 500}
562 501
563void signal_fault(struct pt_regs *regs, void __user *frame, char *where) 502void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
564{ 503{
565 struct task_struct *me = current; 504 struct task_struct *me = current;
505
566 if (show_unhandled_signals && printk_ratelimit()) { 506 if (show_unhandled_signals && printk_ratelimit()) {
567 printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", 507 printk(KERN_INFO
568 me->comm,me->pid,where,frame,regs->ip,regs->sp,regs->orig_ax); 508 "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
509 me->comm, me->pid, where, frame,
510 regs->ip, regs->sp, regs->orig_ax);
569 print_vma_addr(" in ", regs->ip); 511 print_vma_addr(" in ", regs->ip);
570 printk("\n"); 512 printk(KERN_CONT "\n");
571 } 513 }
572 514
573 force_sig(SIGSEGV, me); 515 force_sig(SIGSEGV, me);
574} 516}
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 361b7a4c640c..18f9b19f5f8f 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -214,12 +214,16 @@ void smp_call_function_single_interrupt(struct pt_regs *regs)
214struct smp_ops smp_ops = { 214struct smp_ops smp_ops = {
215 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, 215 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
216 .smp_prepare_cpus = native_smp_prepare_cpus, 216 .smp_prepare_cpus = native_smp_prepare_cpus,
217 .cpu_up = native_cpu_up,
218 .smp_cpus_done = native_smp_cpus_done, 217 .smp_cpus_done = native_smp_cpus_done,
219 218
220 .smp_send_stop = native_smp_send_stop, 219 .smp_send_stop = native_smp_send_stop,
221 .smp_send_reschedule = native_smp_send_reschedule, 220 .smp_send_reschedule = native_smp_send_reschedule,
222 221
222 .cpu_up = native_cpu_up,
223 .cpu_die = native_cpu_die,
224 .cpu_disable = native_cpu_disable,
225 .play_dead = native_play_dead,
226
223 .send_call_func_ipi = native_send_call_func_ipi, 227 .send_call_func_ipi = native_send_call_func_ipi,
224 .send_call_func_single_ipi = native_send_call_func_single_ipi, 228 .send_call_func_single_ipi = native_send_call_func_single_ipi,
225}; 229};
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7985c5b3f916..7ece815ea637 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -52,6 +52,7 @@
52#include <asm/desc.h> 52#include <asm/desc.h>
53#include <asm/nmi.h> 53#include <asm/nmi.h>
54#include <asm/irq.h> 54#include <asm/irq.h>
55#include <asm/idle.h>
55#include <asm/smp.h> 56#include <asm/smp.h>
56#include <asm/trampoline.h> 57#include <asm/trampoline.h>
57#include <asm/cpu.h> 58#include <asm/cpu.h>
@@ -88,7 +89,7 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
88#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) 89#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
89#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) 90#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
90#else 91#else
91struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; 92static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
92#define get_idle_for_cpu(x) (idle_thread_array[(x)]) 93#define get_idle_for_cpu(x) (idle_thread_array[(x)])
93#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) 94#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
94#endif 95#endif
@@ -123,13 +124,12 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
123 124
124static atomic_t init_deasserted; 125static atomic_t init_deasserted;
125 126
126static int boot_cpu_logical_apicid;
127 127
128/* representing cpus for which sibling maps can be computed */ 128/* representing cpus for which sibling maps can be computed */
129static cpumask_t cpu_sibling_setup_map; 129static cpumask_t cpu_sibling_setup_map;
130 130
131/* Set if we find a B stepping CPU */ 131/* Set if we find a B stepping CPU */
132int __cpuinitdata smp_b_stepping; 132static int __cpuinitdata smp_b_stepping;
133 133
134#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) 134#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
135 135
@@ -165,6 +165,8 @@ static void unmap_cpu_to_node(int cpu)
165#endif 165#endif
166 166
167#ifdef CONFIG_X86_32 167#ifdef CONFIG_X86_32
168static int boot_cpu_logical_apicid;
169
168u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = 170u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
169 { [0 ... NR_CPUS-1] = BAD_APICID }; 171 { [0 ... NR_CPUS-1] = BAD_APICID };
170 172
@@ -210,7 +212,7 @@ static void __cpuinit smp_callin(void)
210 /* 212 /*
211 * (This works even if the APIC is not enabled.) 213 * (This works even if the APIC is not enabled.)
212 */ 214 */
213 phys_id = GET_APIC_ID(read_apic_id()); 215 phys_id = read_apic_id();
214 cpuid = smp_processor_id(); 216 cpuid = smp_processor_id();
215 if (cpu_isset(cpuid, cpu_callin_map)) { 217 if (cpu_isset(cpuid, cpu_callin_map)) {
216 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, 218 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
@@ -257,6 +259,7 @@ static void __cpuinit smp_callin(void)
257 end_local_APIC_setup(); 259 end_local_APIC_setup();
258 map_cpu_to_logical_apicid(); 260 map_cpu_to_logical_apicid();
259 261
262 notify_cpu_starting(cpuid);
260 /* 263 /*
261 * Get our bogomips. 264 * Get our bogomips.
262 * 265 *
@@ -279,6 +282,8 @@ static void __cpuinit smp_callin(void)
279 cpu_set(cpuid, cpu_callin_map); 282 cpu_set(cpuid, cpu_callin_map);
280} 283}
281 284
285static int __cpuinitdata unsafe_smp;
286
282/* 287/*
283 * Activate a secondary processor. 288 * Activate a secondary processor.
284 */ 289 */
@@ -331,14 +336,17 @@ static void __cpuinit start_secondary(void *unused)
331 * does not change while we are assigning vectors to cpus. Holding 336 * does not change while we are assigning vectors to cpus. Holding
332 * this lock ensures we don't half assign or remove an irq from a cpu. 337 * this lock ensures we don't half assign or remove an irq from a cpu.
333 */ 338 */
334 ipi_call_lock_irq(); 339 ipi_call_lock();
335 lock_vector_lock(); 340 lock_vector_lock();
336 __setup_vector_irq(smp_processor_id()); 341 __setup_vector_irq(smp_processor_id());
337 cpu_set(smp_processor_id(), cpu_online_map); 342 cpu_set(smp_processor_id(), cpu_online_map);
338 unlock_vector_lock(); 343 unlock_vector_lock();
339 ipi_call_unlock_irq(); 344 ipi_call_unlock();
340 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 345 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
341 346
347 /* enable local interrupts */
348 local_irq_enable();
349
342 setup_secondary_clock(); 350 setup_secondary_clock();
343 351
344 wmb(); 352 wmb();
@@ -391,7 +399,7 @@ static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
391 goto valid_k7; 399 goto valid_k7;
392 400
393 /* If we get here, not a certified SMP capable AMD system. */ 401 /* If we get here, not a certified SMP capable AMD system. */
394 add_taint(TAINT_UNSAFE_SMP); 402 unsafe_smp = 1;
395 } 403 }
396 404
397valid_k7: 405valid_k7:
@@ -408,12 +416,10 @@ static void __cpuinit smp_checks(void)
408 * Don't taint if we are running SMP kernel on a single non-MP 416 * Don't taint if we are running SMP kernel on a single non-MP
409 * approved Athlon 417 * approved Athlon
410 */ 418 */
411 if (tainted & TAINT_UNSAFE_SMP) { 419 if (unsafe_smp && num_online_cpus() > 1) {
412 if (num_online_cpus()) 420 printk(KERN_INFO "WARNING: This combination of AMD"
413 printk(KERN_INFO "WARNING: This combination of AMD" 421 "processors is not suitable for SMP.\n");
414 "processors is not suitable for SMP.\n"); 422 add_taint(TAINT_UNSAFE_SMP);
415 else
416 tainted &= ~TAINT_UNSAFE_SMP;
417 } 423 }
418} 424}
419 425
@@ -537,10 +543,10 @@ static inline void __inquire_remote_apic(int apicid)
537 int timeout; 543 int timeout;
538 u32 status; 544 u32 status;
539 545
540 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); 546 printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid);
541 547
542 for (i = 0; i < ARRAY_SIZE(regs); i++) { 548 for (i = 0; i < ARRAY_SIZE(regs); i++) {
543 printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]); 549 printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]);
544 550
545 /* 551 /*
546 * Wait for idle. 552 * Wait for idle.
@@ -550,8 +556,7 @@ static inline void __inquire_remote_apic(int apicid)
550 printk(KERN_CONT 556 printk(KERN_CONT
551 "a previous APIC delivery may have failed\n"); 557 "a previous APIC delivery may have failed\n");
552 558
553 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); 559 apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
554 apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]);
555 560
556 timeout = 0; 561 timeout = 0;
557 do { 562 do {
@@ -583,11 +588,9 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
583 int maxlvt; 588 int maxlvt;
584 589
585 /* Target chip */ 590 /* Target chip */
586 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
587
588 /* Boot on the stack */ 591 /* Boot on the stack */
589 /* Kick the second */ 592 /* Kick the second */
590 apic_write(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL); 593 apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid);
591 594
592 pr_debug("Waiting for send to finish...\n"); 595 pr_debug("Waiting for send to finish...\n");
593 send_status = safe_apic_wait_icr_idle(); 596 send_status = safe_apic_wait_icr_idle();
@@ -596,10 +599,12 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
596 * Give the other CPU some time to accept the IPI. 599 * Give the other CPU some time to accept the IPI.
597 */ 600 */
598 udelay(200); 601 udelay(200);
599 maxlvt = lapic_get_maxlvt(); 602 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
600 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 603 maxlvt = lapic_get_maxlvt();
601 apic_write(APIC_ESR, 0); 604 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
602 accept_status = (apic_read(APIC_ESR) & 0xEF); 605 apic_write(APIC_ESR, 0);
606 accept_status = (apic_read(APIC_ESR) & 0xEF);
607 }
603 pr_debug("NMI sent.\n"); 608 pr_debug("NMI sent.\n");
604 609
605 if (send_status) 610 if (send_status)
@@ -640,13 +645,11 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
640 /* 645 /*
641 * Turn INIT on target chip 646 * Turn INIT on target chip
642 */ 647 */
643 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
644
645 /* 648 /*
646 * Send IPI 649 * Send IPI
647 */ 650 */
648 apic_write(APIC_ICR, 651 apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
649 APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT); 652 phys_apicid);
650 653
651 pr_debug("Waiting for send to finish...\n"); 654 pr_debug("Waiting for send to finish...\n");
652 send_status = safe_apic_wait_icr_idle(); 655 send_status = safe_apic_wait_icr_idle();
@@ -656,10 +659,8 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
656 pr_debug("Deasserting INIT.\n"); 659 pr_debug("Deasserting INIT.\n");
657 660
658 /* Target chip */ 661 /* Target chip */
659 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
660
661 /* Send IPI */ 662 /* Send IPI */
662 apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); 663 apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
663 664
664 pr_debug("Waiting for send to finish...\n"); 665 pr_debug("Waiting for send to finish...\n");
665 send_status = safe_apic_wait_icr_idle(); 666 send_status = safe_apic_wait_icr_idle();
@@ -702,11 +703,10 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
702 */ 703 */
703 704
704 /* Target chip */ 705 /* Target chip */
705 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
706
707 /* Boot on the stack */ 706 /* Boot on the stack */
708 /* Kick the second */ 707 /* Kick the second */
709 apic_write(APIC_ICR, APIC_DM_STARTUP | (start_eip >> 12)); 708 apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
709 phys_apicid);
710 710
711 /* 711 /*
712 * Give the other CPU some time to accept the IPI. 712 * Give the other CPU some time to accept the IPI.
@@ -874,7 +874,7 @@ do_rest:
874 start_ip = setup_trampoline(); 874 start_ip = setup_trampoline();
875 875
876 /* So we see what's up */ 876 /* So we see what's up */
877 printk(KERN_INFO "Booting processor %d/%d ip %lx\n", 877 printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n",
878 cpu, apicid, start_ip); 878 cpu, apicid, start_ip);
879 879
880 /* 880 /*
@@ -1175,10 +1175,17 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1175 * Setup boot CPU information 1175 * Setup boot CPU information
1176 */ 1176 */
1177 smp_store_cpu_info(0); /* Final full version of the data */ 1177 smp_store_cpu_info(0); /* Final full version of the data */
1178#ifdef CONFIG_X86_32
1178 boot_cpu_logical_apicid = logical_smp_processor_id(); 1179 boot_cpu_logical_apicid = logical_smp_processor_id();
1180#endif
1179 current_thread_info()->cpu = 0; /* needed? */ 1181 current_thread_info()->cpu = 0; /* needed? */
1180 set_cpu_sibling_map(0); 1182 set_cpu_sibling_map(0);
1181 1183
1184#ifdef CONFIG_X86_64
1185 enable_IR_x2apic();
1186 setup_apic_routing();
1187#endif
1188
1182 if (smp_sanity_check(max_cpus) < 0) { 1189 if (smp_sanity_check(max_cpus) < 0) {
1183 printk(KERN_INFO "SMP disabled\n"); 1190 printk(KERN_INFO "SMP disabled\n");
1184 disable_smp(); 1191 disable_smp();
@@ -1186,9 +1193,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1186 } 1193 }
1187 1194
1188 preempt_disable(); 1195 preempt_disable();
1189 if (GET_APIC_ID(read_apic_id()) != boot_cpu_physical_apicid) { 1196 if (read_apic_id() != boot_cpu_physical_apicid) {
1190 panic("Boot APIC ID in local APIC unexpected (%d vs %d)", 1197 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1191 GET_APIC_ID(read_apic_id()), boot_cpu_physical_apicid); 1198 read_apic_id(), boot_cpu_physical_apicid);
1192 /* Or can we switch back to PIC here? */ 1199 /* Or can we switch back to PIC here? */
1193 } 1200 }
1194 preempt_enable(); 1201 preempt_enable();
@@ -1254,39 +1261,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
1254 check_nmi_watchdog(); 1261 check_nmi_watchdog();
1255} 1262}
1256 1263
1257#ifdef CONFIG_HOTPLUG_CPU
1258
1259static void remove_siblinginfo(int cpu)
1260{
1261 int sibling;
1262 struct cpuinfo_x86 *c = &cpu_data(cpu);
1263
1264 for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
1265 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
1266 /*/
1267 * last thread sibling in this cpu core going down
1268 */
1269 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1270 cpu_data(sibling).booted_cores--;
1271 }
1272
1273 for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
1274 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1275 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1276 cpus_clear(per_cpu(cpu_core_map, cpu));
1277 c->phys_proc_id = 0;
1278 c->cpu_core_id = 0;
1279 cpu_clear(cpu, cpu_sibling_setup_map);
1280}
1281
1282static int additional_cpus __initdata = -1;
1283
1284static __init int setup_additional_cpus(char *s)
1285{
1286 return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
1287}
1288early_param("additional_cpus", setup_additional_cpus);
1289
1290/* 1264/*
1291 * cpu_possible_map should be static, it cannot change as cpu's 1265 * cpu_possible_map should be static, it cannot change as cpu's
1292 * are onlined, or offlined. The reason is per-cpu data-structures 1266 * are onlined, or offlined. The reason is per-cpu data-structures
@@ -1306,24 +1280,13 @@ early_param("additional_cpus", setup_additional_cpus);
1306 */ 1280 */
1307__init void prefill_possible_map(void) 1281__init void prefill_possible_map(void)
1308{ 1282{
1309 int i; 1283 int i, possible;
1310 int possible;
1311 1284
1312 /* no processor from mptable or madt */ 1285 /* no processor from mptable or madt */
1313 if (!num_processors) 1286 if (!num_processors)
1314 num_processors = 1; 1287 num_processors = 1;
1315 1288
1316#ifdef CONFIG_HOTPLUG_CPU 1289 possible = num_processors + disabled_cpus;
1317 if (additional_cpus == -1) {
1318 if (disabled_cpus > 0)
1319 additional_cpus = disabled_cpus;
1320 else
1321 additional_cpus = 0;
1322 }
1323#else
1324 additional_cpus = 0;
1325#endif
1326 possible = num_processors + additional_cpus;
1327 if (possible > NR_CPUS) 1290 if (possible > NR_CPUS)
1328 possible = NR_CPUS; 1291 possible = NR_CPUS;
1329 1292
@@ -1336,6 +1299,31 @@ __init void prefill_possible_map(void)
1336 nr_cpu_ids = possible; 1299 nr_cpu_ids = possible;
1337} 1300}
1338 1301
1302#ifdef CONFIG_HOTPLUG_CPU
1303
1304static void remove_siblinginfo(int cpu)
1305{
1306 int sibling;
1307 struct cpuinfo_x86 *c = &cpu_data(cpu);
1308
1309 for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
1310 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
1311 /*/
1312 * last thread sibling in this cpu core going down
1313 */
1314 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1315 cpu_data(sibling).booted_cores--;
1316 }
1317
1318 for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
1319 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1320 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1321 cpus_clear(per_cpu(cpu_core_map, cpu));
1322 c->phys_proc_id = 0;
1323 c->cpu_core_id = 0;
1324 cpu_clear(cpu, cpu_sibling_setup_map);
1325}
1326
1339static void __ref remove_cpu_from_maps(int cpu) 1327static void __ref remove_cpu_from_maps(int cpu)
1340{ 1328{
1341 cpu_clear(cpu, cpu_online_map); 1329 cpu_clear(cpu, cpu_online_map);
@@ -1346,25 +1334,9 @@ static void __ref remove_cpu_from_maps(int cpu)
1346 numa_remove_cpu(cpu); 1334 numa_remove_cpu(cpu);
1347} 1335}
1348 1336
1349int __cpu_disable(void) 1337void cpu_disable_common(void)
1350{ 1338{
1351 int cpu = smp_processor_id(); 1339 int cpu = smp_processor_id();
1352
1353 /*
1354 * Perhaps use cpufreq to drop frequency, but that could go
1355 * into generic code.
1356 *
1357 * We won't take down the boot processor on i386 due to some
1358 * interrupts only being able to be serviced by the BSP.
1359 * Especially so if we're not using an IOAPIC -zwane
1360 */
1361 if (cpu == 0)
1362 return -EBUSY;
1363
1364 if (nmi_watchdog == NMI_LOCAL_APIC)
1365 stop_apic_nmi_watchdog(NULL);
1366 clear_local_APIC();
1367
1368 /* 1340 /*
1369 * HACK: 1341 * HACK:
1370 * Allow any queued timer interrupts to get serviced 1342 * Allow any queued timer interrupts to get serviced
@@ -1382,10 +1354,32 @@ int __cpu_disable(void)
1382 remove_cpu_from_maps(cpu); 1354 remove_cpu_from_maps(cpu);
1383 unlock_vector_lock(); 1355 unlock_vector_lock();
1384 fixup_irqs(cpu_online_map); 1356 fixup_irqs(cpu_online_map);
1357}
1358
1359int native_cpu_disable(void)
1360{
1361 int cpu = smp_processor_id();
1362
1363 /*
1364 * Perhaps use cpufreq to drop frequency, but that could go
1365 * into generic code.
1366 *
1367 * We won't take down the boot processor on i386 due to some
1368 * interrupts only being able to be serviced by the BSP.
1369 * Especially so if we're not using an IOAPIC -zwane
1370 */
1371 if (cpu == 0)
1372 return -EBUSY;
1373
1374 if (nmi_watchdog == NMI_LOCAL_APIC)
1375 stop_apic_nmi_watchdog(NULL);
1376 clear_local_APIC();
1377
1378 cpu_disable_common();
1385 return 0; 1379 return 0;
1386} 1380}
1387 1381
1388void __cpu_die(unsigned int cpu) 1382void native_cpu_die(unsigned int cpu)
1389{ 1383{
1390 /* We don't do anything here: idle task is faking death itself. */ 1384 /* We don't do anything here: idle task is faking death itself. */
1391 unsigned int i; 1385 unsigned int i;
@@ -1402,15 +1396,45 @@ void __cpu_die(unsigned int cpu)
1402 } 1396 }
1403 printk(KERN_ERR "CPU %u didn't die...\n", cpu); 1397 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1404} 1398}
1399
1400void play_dead_common(void)
1401{
1402 idle_task_exit();
1403 reset_lazy_tlbstate();
1404 irq_ctx_exit(raw_smp_processor_id());
1405 c1e_remove_cpu(raw_smp_processor_id());
1406
1407 mb();
1408 /* Ack it */
1409 __get_cpu_var(cpu_state) = CPU_DEAD;
1410
1411 /*
1412 * With physical CPU hotplug, we should halt the cpu
1413 */
1414 local_irq_disable();
1415}
1416
1417void native_play_dead(void)
1418{
1419 play_dead_common();
1420 wbinvd_halt();
1421}
1422
1405#else /* ... !CONFIG_HOTPLUG_CPU */ 1423#else /* ... !CONFIG_HOTPLUG_CPU */
1406int __cpu_disable(void) 1424int native_cpu_disable(void)
1407{ 1425{
1408 return -ENOSYS; 1426 return -ENOSYS;
1409} 1427}
1410 1428
1411void __cpu_die(unsigned int cpu) 1429void native_cpu_die(unsigned int cpu)
1412{ 1430{
1413 /* We said "no" in __cpu_disable */ 1431 /* We said "no" in __cpu_disable */
1414 BUG(); 1432 BUG();
1415} 1433}
1434
1435void native_play_dead(void)
1436{
1437 BUG();
1438}
1439
1416#endif 1440#endif
diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c
index d67ce5f044ba..7b987852e876 100644
--- a/arch/x86/kernel/summit_32.c
+++ b/arch/x86/kernel/summit_32.c
@@ -30,7 +30,7 @@
30#include <linux/init.h> 30#include <linux/init.h>
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/bios_ebda.h> 32#include <asm/bios_ebda.h>
33#include <asm/mach-summit/mach_mpparse.h> 33#include <asm/summit/mpparse.h>
34 34
35static struct rio_table_hdr *rio_table_hdr __initdata; 35static struct rio_table_hdr *rio_table_hdr __initdata;
36static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; 36static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata;
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
index 7066cb855a60..1884a8d12bfa 100644
--- a/arch/x86/kernel/sys_i386_32.c
+++ b/arch/x86/kernel/sys_i386_32.c
@@ -22,6 +22,8 @@
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23#include <linux/unistd.h> 23#include <linux/unistd.h>
24 24
25#include <asm/syscalls.h>
26
25asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, 27asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
26 unsigned long prot, unsigned long flags, 28 unsigned long prot, unsigned long flags,
27 unsigned long fd, unsigned long pgoff) 29 unsigned long fd, unsigned long pgoff)
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 3b360ef33817..6bc211accf08 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -13,15 +13,17 @@
13#include <linux/utsname.h> 13#include <linux/utsname.h>
14#include <linux/personality.h> 14#include <linux/personality.h>
15#include <linux/random.h> 15#include <linux/random.h>
16#include <linux/uaccess.h>
16 17
17#include <asm/uaccess.h>
18#include <asm/ia32.h> 18#include <asm/ia32.h>
19#include <asm/syscalls.h>
19 20
20asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, 21asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
21 unsigned long fd, unsigned long off) 22 unsigned long prot, unsigned long flags,
23 unsigned long fd, unsigned long off)
22{ 24{
23 long error; 25 long error;
24 struct file * file; 26 struct file *file;
25 27
26 error = -EINVAL; 28 error = -EINVAL;
27 if (off & ~PAGE_MASK) 29 if (off & ~PAGE_MASK)
@@ -56,9 +58,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
56 unmapped base down for this case. This can give 58 unmapped base down for this case. This can give
57 conflicts with the heap, but we assume that glibc 59 conflicts with the heap, but we assume that glibc
58 malloc knows how to fall back to mmap. Give it 1GB 60 malloc knows how to fall back to mmap. Give it 1GB
59 of playground for now. -AK */ 61 of playground for now. -AK */
60 *begin = 0x40000000; 62 *begin = 0x40000000;
61 *end = 0x80000000; 63 *end = 0x80000000;
62 if (current->flags & PF_RANDOMIZE) { 64 if (current->flags & PF_RANDOMIZE) {
63 new_begin = randomize_range(*begin, *begin + 0x02000000, 0); 65 new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
64 if (new_begin) 66 if (new_begin)
@@ -66,9 +68,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
66 } 68 }
67 } else { 69 } else {
68 *begin = TASK_UNMAPPED_BASE; 70 *begin = TASK_UNMAPPED_BASE;
69 *end = TASK_SIZE; 71 *end = TASK_SIZE;
70 } 72 }
71} 73}
72 74
73unsigned long 75unsigned long
74arch_get_unmapped_area(struct file *filp, unsigned long addr, 76arch_get_unmapped_area(struct file *filp, unsigned long addr,
@@ -78,11 +80,11 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
78 struct vm_area_struct *vma; 80 struct vm_area_struct *vma;
79 unsigned long start_addr; 81 unsigned long start_addr;
80 unsigned long begin, end; 82 unsigned long begin, end;
81 83
82 if (flags & MAP_FIXED) 84 if (flags & MAP_FIXED)
83 return addr; 85 return addr;
84 86
85 find_start_end(flags, &begin, &end); 87 find_start_end(flags, &begin, &end);
86 88
87 if (len > end) 89 if (len > end)
88 return -ENOMEM; 90 return -ENOMEM;
@@ -96,12 +98,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
96 } 98 }
97 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) 99 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
98 && len <= mm->cached_hole_size) { 100 && len <= mm->cached_hole_size) {
99 mm->cached_hole_size = 0; 101 mm->cached_hole_size = 0;
100 mm->free_area_cache = begin; 102 mm->free_area_cache = begin;
101 } 103 }
102 addr = mm->free_area_cache; 104 addr = mm->free_area_cache;
103 if (addr < begin) 105 if (addr < begin)
104 addr = begin; 106 addr = begin;
105 start_addr = addr; 107 start_addr = addr;
106 108
107full_search: 109full_search:
@@ -127,7 +129,7 @@ full_search:
127 return addr; 129 return addr;
128 } 130 }
129 if (addr + mm->cached_hole_size < vma->vm_start) 131 if (addr + mm->cached_hole_size < vma->vm_start)
130 mm->cached_hole_size = vma->vm_start - addr; 132 mm->cached_hole_size = vma->vm_start - addr;
131 133
132 addr = vma->vm_end; 134 addr = vma->vm_end;
133 } 135 }
@@ -177,7 +179,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
177 vma = find_vma(mm, addr-len); 179 vma = find_vma(mm, addr-len);
178 if (!vma || addr <= vma->vm_start) 180 if (!vma || addr <= vma->vm_start)
179 /* remember the address as a hint for next time */ 181 /* remember the address as a hint for next time */
180 return (mm->free_area_cache = addr-len); 182 return mm->free_area_cache = addr-len;
181 } 183 }
182 184
183 if (mm->mmap_base < len) 185 if (mm->mmap_base < len)
@@ -194,7 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
194 vma = find_vma(mm, addr); 196 vma = find_vma(mm, addr);
195 if (!vma || addr+len <= vma->vm_start) 197 if (!vma || addr+len <= vma->vm_start)
196 /* remember the address as a hint for next time */ 198 /* remember the address as a hint for next time */
197 return (mm->free_area_cache = addr); 199 return mm->free_area_cache = addr;
198 200
199 /* remember the largest hole we saw so far */ 201 /* remember the largest hole we saw so far */
200 if (addr + mm->cached_hole_size < vma->vm_start) 202 if (addr + mm->cached_hole_size < vma->vm_start)
@@ -224,13 +226,13 @@ bottomup:
224} 226}
225 227
226 228
227asmlinkage long sys_uname(struct new_utsname __user * name) 229asmlinkage long sys_uname(struct new_utsname __user *name)
228{ 230{
229 int err; 231 int err;
230 down_read(&uts_sem); 232 down_read(&uts_sem);
231 err = copy_to_user(name, utsname(), sizeof (*name)); 233 err = copy_to_user(name, utsname(), sizeof(*name));
232 up_read(&uts_sem); 234 up_read(&uts_sem);
233 if (personality(current->personality) == PER_LINUX32) 235 if (personality(current->personality) == PER_LINUX32)
234 err |= copy_to_user(&name->machine, "i686", 5); 236 err |= copy_to_user(&name->machine, "i686", 5);
235 return err ? -EFAULT : 0; 237 return err ? -EFAULT : 0;
236} 238}
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c
index 170d43c17487..3d1be4f0fac5 100644
--- a/arch/x86/kernel/syscall_64.c
+++ b/arch/x86/kernel/syscall_64.c
@@ -8,12 +8,12 @@
8#define __NO_STUBS 8#define __NO_STUBS
9 9
10#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; 10#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ;
11#undef _ASM_X86_64_UNISTD_H_ 11#undef ASM_X86__UNISTD_64_H
12#include <asm/unistd_64.h> 12#include <asm/unistd_64.h>
13 13
14#undef __SYSCALL 14#undef __SYSCALL
15#define __SYSCALL(nr, sym) [nr] = sym, 15#define __SYSCALL(nr, sym) [nr] = sym,
16#undef _ASM_X86_64_UNISTD_H_ 16#undef ASM_X86__UNISTD_64_H
17 17
18typedef void (*sys_call_ptr_t)(void); 18typedef void (*sys_call_ptr_t)(void);
19 19
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index ffe3c664afc0..77b400f06ea2 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -36,6 +36,7 @@
36#include <asm/arch_hooks.h> 36#include <asm/arch_hooks.h>
37#include <asm/hpet.h> 37#include <asm/hpet.h>
38#include <asm/time.h> 38#include <asm/time.h>
39#include <asm/timer.h>
39 40
40#include "do_timer.h" 41#include "do_timer.h"
41 42
@@ -46,10 +47,9 @@ unsigned long profile_pc(struct pt_regs *regs)
46 unsigned long pc = instruction_pointer(regs); 47 unsigned long pc = instruction_pointer(regs);
47 48
48#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP
49 if (!v8086_mode(regs) && SEGMENT_IS_KERNEL_CODE(regs->cs) && 50 if (!user_mode_vm(regs) && in_lock_functions(pc)) {
50 in_lock_functions(pc)) {
51#ifdef CONFIG_FRAME_POINTER 51#ifdef CONFIG_FRAME_POINTER
52 return *(unsigned long *)(regs->bp + 4); 52 return *(unsigned long *)(regs->bp + sizeof(long));
53#else 53#else
54 unsigned long *sp = (unsigned long *)&regs->sp; 54 unsigned long *sp = (unsigned long *)&regs->sp;
55 55
@@ -94,6 +94,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
94 94
95 do_timer_interrupt_hook(); 95 do_timer_interrupt_hook();
96 96
97#ifdef CONFIG_MCA
97 if (MCA_bus) { 98 if (MCA_bus) {
98 /* The PS/2 uses level-triggered interrupts. You can't 99 /* The PS/2 uses level-triggered interrupts. You can't
99 turn them off, nor would you want to (any attempt to 100 turn them off, nor would you want to (any attempt to
@@ -107,6 +108,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
107 u8 irq_v = inb_p( 0x61 ); /* read the current state */ 108 u8 irq_v = inb_p( 0x61 ); /* read the current state */
108 outb_p( irq_v|0x80, 0x61 ); /* reset the IRQ */ 109 outb_p( irq_v|0x80, 0x61 ); /* reset the IRQ */
109 } 110 }
111#endif
110 112
111 return IRQ_HANDLED; 113 return IRQ_HANDLED;
112} 114}
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c
index e3d49c553af2..cb19d650c216 100644
--- a/arch/x86/kernel/time_64.c
+++ b/arch/x86/kernel/time_64.c
@@ -16,6 +16,7 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/time.h> 18#include <linux/time.h>
19#include <linux/mca.h>
19 20
20#include <asm/i8253.h> 21#include <asm/i8253.h>
21#include <asm/hpet.h> 22#include <asm/hpet.h>
@@ -33,23 +34,34 @@ unsigned long profile_pc(struct pt_regs *regs)
33 /* Assume the lock function has either no stack frame or a copy 34 /* Assume the lock function has either no stack frame or a copy
34 of flags from PUSHF 35 of flags from PUSHF
35 Eflags always has bits 22 and up cleared unlike kernel addresses. */ 36 Eflags always has bits 22 and up cleared unlike kernel addresses. */
36 if (!user_mode(regs) && in_lock_functions(pc)) { 37 if (!user_mode_vm(regs) && in_lock_functions(pc)) {
38#ifdef CONFIG_FRAME_POINTER
39 return *(unsigned long *)(regs->bp + sizeof(long));
40#else
37 unsigned long *sp = (unsigned long *)regs->sp; 41 unsigned long *sp = (unsigned long *)regs->sp;
38 if (sp[0] >> 22) 42 if (sp[0] >> 22)
39 return sp[0]; 43 return sp[0];
40 if (sp[1] >> 22) 44 if (sp[1] >> 22)
41 return sp[1]; 45 return sp[1];
46#endif
42 } 47 }
43 return pc; 48 return pc;
44} 49}
45EXPORT_SYMBOL(profile_pc); 50EXPORT_SYMBOL(profile_pc);
46 51
47static irqreturn_t timer_event_interrupt(int irq, void *dev_id) 52irqreturn_t timer_interrupt(int irq, void *dev_id)
48{ 53{
49 add_pda(irq0_irqs, 1); 54 add_pda(irq0_irqs, 1);
50 55
51 global_clock_event->event_handler(global_clock_event); 56 global_clock_event->event_handler(global_clock_event);
52 57
58#ifdef CONFIG_MCA
59 if (MCA_bus) {
60 u8 irq_v = inb_p(0x61); /* read the current state */
61 outb_p(irq_v|0x80, 0x61); /* reset the IRQ */
62 }
63#endif
64
53 return IRQ_HANDLED; 65 return IRQ_HANDLED;
54} 66}
55 67
@@ -100,7 +112,7 @@ unsigned long __init calibrate_cpu(void)
100} 112}
101 113
102static struct irqaction irq0 = { 114static struct irqaction irq0 = {
103 .handler = timer_event_interrupt, 115 .handler = timer_interrupt,
104 .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING, 116 .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING,
105 .mask = CPU_MASK_NONE, 117 .mask = CPU_MASK_NONE,
106 .name = "timer" 118 .name = "timer"
@@ -111,16 +123,13 @@ void __init hpet_time_init(void)
111 if (!hpet_enable()) 123 if (!hpet_enable())
112 setup_pit_timer(); 124 setup_pit_timer();
113 125
126 irq0.mask = cpumask_of_cpu(0);
114 setup_irq(0, &irq0); 127 setup_irq(0, &irq0);
115} 128}
116 129
117void __init time_init(void) 130void __init time_init(void)
118{ 131{
119 tsc_init(); 132 tsc_init();
120 if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
121 vgetcpu_mode = VGETCPU_RDTSCP;
122 else
123 vgetcpu_mode = VGETCPU_LSL;
124 133
125 late_time_init = choose_time_init(); 134 late_time_init = choose_time_init();
126} 135}
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
index fec1ecedc9b7..e00534b33534 100644
--- a/arch/x86/kernel/tlb_32.c
+++ b/arch/x86/kernel/tlb_32.c
@@ -241,3 +241,11 @@ void flush_tlb_all(void)
241 on_each_cpu(do_flush_tlb_all, NULL, 1); 241 on_each_cpu(do_flush_tlb_all, NULL, 1);
242} 242}
243 243
244void reset_lazy_tlbstate(void)
245{
246 int cpu = raw_smp_processor_id();
247
248 per_cpu(cpu_tlbstate, cpu).state = 0;
249 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
250}
251
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index ab6bf375a307..6bb7b8579e70 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -10,6 +10,7 @@
10#include <asm/ldt.h> 10#include <asm/ldt.h>
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <asm/proto.h> 12#include <asm/proto.h>
13#include <asm/syscalls.h>
13 14
14#include "tls.h" 15#include "tls.h"
15 16
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps.c
index 03df8e45e5a1..e062974cce34 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps.c
@@ -7,13 +7,11 @@
7 */ 7 */
8 8
9/* 9/*
10 * 'Traps.c' handles hardware traps and faults after we have saved some 10 * Handle hardware traps and faults.
11 * state in 'asm.s'.
12 */ 11 */
13#include <linux/interrupt.h> 12#include <linux/interrupt.h>
14#include <linux/kallsyms.h> 13#include <linux/kallsyms.h>
15#include <linux/spinlock.h> 14#include <linux/spinlock.h>
16#include <linux/highmem.h>
17#include <linux/kprobes.h> 15#include <linux/kprobes.h>
18#include <linux/uaccess.h> 16#include <linux/uaccess.h>
19#include <linux/utsname.h> 17#include <linux/utsname.h>
@@ -32,6 +30,8 @@
32#include <linux/bug.h> 30#include <linux/bug.h>
33#include <linux/nmi.h> 31#include <linux/nmi.h>
34#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/smp.h>
34#include <linux/io.h>
35 35
36#ifdef CONFIG_EISA 36#ifdef CONFIG_EISA
37#include <linux/ioport.h> 37#include <linux/ioport.h>
@@ -46,21 +46,31 @@
46#include <linux/edac.h> 46#include <linux/edac.h>
47#endif 47#endif
48 48
49#include <asm/arch_hooks.h>
50#include <asm/stacktrace.h> 49#include <asm/stacktrace.h>
51#include <asm/processor.h> 50#include <asm/processor.h>
52#include <asm/debugreg.h> 51#include <asm/debugreg.h>
53#include <asm/atomic.h> 52#include <asm/atomic.h>
54#include <asm/system.h> 53#include <asm/system.h>
55#include <asm/unwind.h> 54#include <asm/unwind.h>
55#include <asm/traps.h>
56#include <asm/desc.h> 56#include <asm/desc.h>
57#include <asm/i387.h> 57#include <asm/i387.h>
58
59#include <mach_traps.h>
60
61#ifdef CONFIG_X86_64
62#include <asm/pgalloc.h>
63#include <asm/proto.h>
64#include <asm/pda.h>
65#else
66#include <asm/processor-flags.h>
67#include <asm/arch_hooks.h>
58#include <asm/nmi.h> 68#include <asm/nmi.h>
59#include <asm/smp.h> 69#include <asm/smp.h>
60#include <asm/io.h> 70#include <asm/io.h>
61#include <asm/traps.h> 71#include <asm/traps.h>
62 72
63#include "mach_traps.h" 73#include "cpu/mcheck/mce.h"
64 74
65DECLARE_BITMAP(used_vectors, NR_VECTORS); 75DECLARE_BITMAP(used_vectors, NR_VECTORS);
66EXPORT_SYMBOL_GPL(used_vectors); 76EXPORT_SYMBOL_GPL(used_vectors);
@@ -77,418 +87,104 @@ char ignore_fpu_irq;
77 */ 87 */
78gate_desc idt_table[256] 88gate_desc idt_table[256]
79 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; 89 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
80
81int panic_on_unrecovered_nmi;
82int kstack_depth_to_print = 24;
83static unsigned int code_bytes = 64;
84static int ignore_nmis;
85static int die_counter;
86
87void printk_address(unsigned long address, int reliable)
88{
89#ifdef CONFIG_KALLSYMS
90 unsigned long offset = 0;
91 unsigned long symsize;
92 const char *symname;
93 char *modname;
94 char *delim = ":";
95 char namebuf[KSYM_NAME_LEN];
96 char reliab[4] = "";
97
98 symname = kallsyms_lookup(address, &symsize, &offset,
99 &modname, namebuf);
100 if (!symname) {
101 printk(" [<%08lx>]\n", address);
102 return;
103 }
104 if (!reliable)
105 strcpy(reliab, "? ");
106
107 if (!modname)
108 modname = delim = "";
109 printk(" [<%08lx>] %s%s%s%s%s+0x%lx/0x%lx\n",
110 address, reliab, delim, modname, delim, symname, offset, symsize);
111#else
112 printk(" [<%08lx>]\n", address);
113#endif 90#endif
114}
115
116static inline int valid_stack_ptr(struct thread_info *tinfo,
117 void *p, unsigned int size)
118{
119 void *t = tinfo;
120 return p > t && p <= t + THREAD_SIZE - size;
121}
122
123/* The form of the top of the frame on the stack */
124struct stack_frame {
125 struct stack_frame *next_frame;
126 unsigned long return_address;
127};
128
129static inline unsigned long
130print_context_stack(struct thread_info *tinfo,
131 unsigned long *stack, unsigned long bp,
132 const struct stacktrace_ops *ops, void *data)
133{
134 struct stack_frame *frame = (struct stack_frame *)bp;
135
136 while (valid_stack_ptr(tinfo, stack, sizeof(*stack))) {
137 unsigned long addr;
138
139 addr = *stack;
140 if (__kernel_text_address(addr)) {
141 if ((unsigned long) stack == bp + 4) {
142 ops->address(data, addr, 1);
143 frame = frame->next_frame;
144 bp = (unsigned long) frame;
145 } else {
146 ops->address(data, addr, bp == 0);
147 }
148 }
149 stack++;
150 }
151 return bp;
152}
153
154void dump_trace(struct task_struct *task, struct pt_regs *regs,
155 unsigned long *stack, unsigned long bp,
156 const struct stacktrace_ops *ops, void *data)
157{
158 if (!task)
159 task = current;
160
161 if (!stack) {
162 unsigned long dummy;
163 stack = &dummy;
164 if (task != current)
165 stack = (unsigned long *)task->thread.sp;
166 }
167
168#ifdef CONFIG_FRAME_POINTER
169 if (!bp) {
170 if (task == current) {
171 /* Grab bp right from our regs */
172 asm("movl %%ebp, %0" : "=r" (bp) :);
173 } else {
174 /* bp is the last reg pushed by switch_to */
175 bp = *(unsigned long *) task->thread.sp;
176 }
177 }
178#endif
179
180 for (;;) {
181 struct thread_info *context;
182
183 context = (struct thread_info *)
184 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
185 bp = print_context_stack(context, stack, bp, ops, data);
186 /*
187 * Should be after the line below, but somewhere
188 * in early boot context comes out corrupted and we
189 * can't reference it:
190 */
191 if (ops->stack(data, "IRQ") < 0)
192 break;
193 stack = (unsigned long *)context->previous_esp;
194 if (!stack)
195 break;
196 touch_nmi_watchdog();
197 }
198}
199EXPORT_SYMBOL(dump_trace);
200
201static void
202print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
203{
204 printk(data);
205 print_symbol(msg, symbol);
206 printk("\n");
207}
208
209static void print_trace_warning(void *data, char *msg)
210{
211 printk("%s%s\n", (char *)data, msg);
212}
213 91
214static int print_trace_stack(void *data, char *name) 92static int ignore_nmis;
215{
216 return 0;
217}
218
219/*
220 * Print one address/symbol entries per line.
221 */
222static void print_trace_address(void *data, unsigned long addr, int reliable)
223{
224 printk("%s [<%08lx>] ", (char *)data, addr);
225 if (!reliable)
226 printk("? ");
227 print_symbol("%s\n", addr);
228 touch_nmi_watchdog();
229}
230
231static const struct stacktrace_ops print_trace_ops = {
232 .warning = print_trace_warning,
233 .warning_symbol = print_trace_warning_symbol,
234 .stack = print_trace_stack,
235 .address = print_trace_address,
236};
237 93
238static void 94static inline void conditional_sti(struct pt_regs *regs)
239show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
240 unsigned long *stack, unsigned long bp, char *log_lvl)
241{ 95{
242 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); 96 if (regs->flags & X86_EFLAGS_IF)
243 printk("%s =======================\n", log_lvl); 97 local_irq_enable();
244} 98}
245 99
246void show_trace(struct task_struct *task, struct pt_regs *regs, 100static inline void preempt_conditional_sti(struct pt_regs *regs)
247 unsigned long *stack, unsigned long bp)
248{ 101{
249 show_trace_log_lvl(task, regs, stack, bp, ""); 102 inc_preempt_count();
103 if (regs->flags & X86_EFLAGS_IF)
104 local_irq_enable();
250} 105}
251 106
252static void 107static inline void preempt_conditional_cli(struct pt_regs *regs)
253show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
254 unsigned long *sp, unsigned long bp, char *log_lvl)
255{ 108{
256 unsigned long *stack; 109 if (regs->flags & X86_EFLAGS_IF)
257 int i; 110 local_irq_disable();
258 111 dec_preempt_count();
259 if (sp == NULL) {
260 if (task)
261 sp = (unsigned long *)task->thread.sp;
262 else
263 sp = (unsigned long *)&sp;
264 }
265
266 stack = sp;
267 for (i = 0; i < kstack_depth_to_print; i++) {
268 if (kstack_end(stack))
269 break;
270 if (i && ((i % 8) == 0))
271 printk("\n%s ", log_lvl);
272 printk("%08lx ", *stack++);
273 }
274 printk("\n%sCall Trace:\n", log_lvl);
275
276 show_trace_log_lvl(task, regs, sp, bp, log_lvl);
277} 112}
278 113
279void show_stack(struct task_struct *task, unsigned long *sp) 114#ifdef CONFIG_X86_32
115static inline void
116die_if_kernel(const char *str, struct pt_regs *regs, long err)
280{ 117{
281 printk(" "); 118 if (!user_mode_vm(regs))
282 show_stack_log_lvl(task, NULL, sp, 0, ""); 119 die(str, regs, err);
283} 120}
284 121
285/* 122/*
286 * The architecture-independent dump_stack generator 123 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
124 * invalid offset set (the LAZY one) and the faulting thread has
125 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS,
126 * we set the offset field correctly and return 1.
287 */ 127 */
288void dump_stack(void) 128static int lazy_iobitmap_copy(void)
289{ 129{
290 unsigned long bp = 0; 130 struct thread_struct *thread;
291 unsigned long stack; 131 struct tss_struct *tss;
292 132 int cpu;
293#ifdef CONFIG_FRAME_POINTER
294 if (!bp)
295 asm("movl %%ebp, %0" : "=r" (bp):);
296#endif
297
298 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
299 current->pid, current->comm, print_tainted(),
300 init_utsname()->release,
301 (int)strcspn(init_utsname()->version, " "),
302 init_utsname()->version);
303
304 show_trace(current, NULL, &stack, bp);
305}
306
307EXPORT_SYMBOL(dump_stack);
308
309void show_registers(struct pt_regs *regs)
310{
311 int i;
312 133
313 print_modules(); 134 cpu = get_cpu();
314 __show_registers(regs, 0); 135 tss = &per_cpu(init_tss, cpu);
136 thread = &current->thread;
315 137
316 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)", 138 if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
317 TASK_COMM_LEN, current->comm, task_pid_nr(current), 139 thread->io_bitmap_ptr) {
318 current_thread_info(), current, task_thread_info(current)); 140 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
319 /* 141 thread->io_bitmap_max);
320 * When in-kernel, we also print out the stack and code at the 142 /*
321 * time of the fault.. 143 * If the previously set map was extending to higher ports
322 */ 144 * than the current one, pad extra space with 0xff (no access).
323 if (!user_mode_vm(regs)) { 145 */
324 unsigned int code_prologue = code_bytes * 43 / 64; 146 if (thread->io_bitmap_max < tss->io_bitmap_max) {
325 unsigned int code_len = code_bytes; 147 memset((char *) tss->io_bitmap +
326 unsigned char c; 148 thread->io_bitmap_max, 0xff,
327 u8 *ip; 149 tss->io_bitmap_max - thread->io_bitmap_max);
328
329 printk("\n" KERN_EMERG "Stack: ");
330 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
331
332 printk(KERN_EMERG "Code: ");
333
334 ip = (u8 *)regs->ip - code_prologue;
335 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
336 /* try starting at EIP */
337 ip = (u8 *)regs->ip;
338 code_len = code_len - code_prologue + 1;
339 }
340 for (i = 0; i < code_len; i++, ip++) {
341 if (ip < (u8 *)PAGE_OFFSET ||
342 probe_kernel_address(ip, c)) {
343 printk(" Bad EIP value.");
344 break;
345 }
346 if (ip == (u8 *)regs->ip)
347 printk("<%02x> ", c);
348 else
349 printk("%02x ", c);
350 } 150 }
351 } 151 tss->io_bitmap_max = thread->io_bitmap_max;
352 printk("\n"); 152 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
353} 153 tss->io_bitmap_owner = thread;
354 154 put_cpu();
355int is_valid_bugaddr(unsigned long ip)
356{
357 unsigned short ud2;
358
359 if (ip < PAGE_OFFSET)
360 return 0;
361 if (probe_kernel_address((unsigned short *)ip, ud2))
362 return 0;
363
364 return ud2 == 0x0b0f;
365}
366
367static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
368static int die_owner = -1;
369static unsigned int die_nest_count;
370
371unsigned __kprobes long oops_begin(void)
372{
373 unsigned long flags;
374
375 oops_enter();
376
377 if (die_owner != raw_smp_processor_id()) {
378 console_verbose();
379 raw_local_irq_save(flags);
380 __raw_spin_lock(&die_lock);
381 die_owner = smp_processor_id();
382 die_nest_count = 0;
383 bust_spinlocks(1);
384 } else {
385 raw_local_irq_save(flags);
386 }
387 die_nest_count++;
388 return flags;
389}
390
391void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
392{
393 bust_spinlocks(0);
394 die_owner = -1;
395 add_taint(TAINT_DIE);
396 __raw_spin_unlock(&die_lock);
397 raw_local_irq_restore(flags);
398
399 if (!regs)
400 return;
401
402 if (kexec_should_crash(current))
403 crash_kexec(regs);
404
405 if (in_interrupt())
406 panic("Fatal exception in interrupt");
407
408 if (panic_on_oops)
409 panic("Fatal exception");
410
411 oops_exit();
412 do_exit(signr);
413}
414
415int __kprobes __die(const char *str, struct pt_regs *regs, long err)
416{
417 unsigned short ss;
418 unsigned long sp;
419 155
420 printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
421#ifdef CONFIG_PREEMPT
422 printk("PREEMPT ");
423#endif
424#ifdef CONFIG_SMP
425 printk("SMP ");
426#endif
427#ifdef CONFIG_DEBUG_PAGEALLOC
428 printk("DEBUG_PAGEALLOC");
429#endif
430 printk("\n");
431 if (notify_die(DIE_OOPS, str, regs, err,
432 current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
433 return 1; 156 return 1;
434
435 show_registers(regs);
436 /* Executive summary in case the oops scrolled away */
437 sp = (unsigned long) (&regs->sp);
438 savesegment(ss, ss);
439 if (user_mode(regs)) {
440 sp = regs->sp;
441 ss = regs->ss & 0xffff;
442 } 157 }
443 printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip); 158 put_cpu();
444 print_symbol("%s", regs->ip);
445 printk(" SS:ESP %04x:%08lx\n", ss, sp);
446 return 0;
447}
448
449/*
450 * This is gone through when something in the kernel has done something bad
451 * and is about to be terminated:
452 */
453void die(const char *str, struct pt_regs *regs, long err)
454{
455 unsigned long flags = oops_begin();
456
457 if (die_nest_count < 3) {
458 report_bug(regs->ip, regs);
459
460 if (__die(str, regs, err))
461 regs = NULL;
462 } else {
463 printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
464 }
465
466 oops_end(flags, regs, SIGSEGV);
467}
468 159
469static inline void 160 return 0;
470die_if_kernel(const char *str, struct pt_regs *regs, long err)
471{
472 if (!user_mode_vm(regs))
473 die(str, regs, err);
474} 161}
162#endif
475 163
476static void __kprobes 164static void __kprobes
477do_trap(int trapnr, int signr, char *str, int vm86, struct pt_regs *regs, 165do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
478 long error_code, siginfo_t *info) 166 long error_code, siginfo_t *info)
479{ 167{
480 struct task_struct *tsk = current; 168 struct task_struct *tsk = current;
481 169
170#ifdef CONFIG_X86_32
482 if (regs->flags & X86_VM_MASK) { 171 if (regs->flags & X86_VM_MASK) {
483 if (vm86) 172 /*
173 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
174 * On nmi (interrupt 2), do_trap should not be called.
175 */
176 if (trapnr < 6)
484 goto vm86_trap; 177 goto vm86_trap;
485 goto trap_signal; 178 goto trap_signal;
486 } 179 }
180#endif
487 181
488 if (!user_mode(regs)) 182 if (!user_mode(regs))
489 goto kernel_trap; 183 goto kernel_trap;
490 184
185#ifdef CONFIG_X86_32
491trap_signal: 186trap_signal:
187#endif
492 /* 188 /*
493 * We want error_code and trap_no set for userspace faults and 189 * We want error_code and trap_no set for userspace faults and
494 * kernelspace faults which result in die(), but not 190 * kernelspace faults which result in die(), but not
@@ -501,6 +197,18 @@ trap_signal:
501 tsk->thread.error_code = error_code; 197 tsk->thread.error_code = error_code;
502 tsk->thread.trap_no = trapnr; 198 tsk->thread.trap_no = trapnr;
503 199
200#ifdef CONFIG_X86_64
201 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
202 printk_ratelimit()) {
203 printk(KERN_INFO
204 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
205 tsk->comm, tsk->pid, str,
206 regs->ip, regs->sp, error_code);
207 print_vma_addr(" in ", regs->ip);
208 printk("\n");
209 }
210#endif
211
504 if (info) 212 if (info)
505 force_sig_info(signr, info, tsk); 213 force_sig_info(signr, info, tsk);
506 else 214 else
@@ -515,29 +223,29 @@ kernel_trap:
515 } 223 }
516 return; 224 return;
517 225
226#ifdef CONFIG_X86_32
518vm86_trap: 227vm86_trap:
519 if (handle_vm86_trap((struct kernel_vm86_regs *) regs, 228 if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
520 error_code, trapnr)) 229 error_code, trapnr))
521 goto trap_signal; 230 goto trap_signal;
522 return; 231 return;
232#endif
523} 233}
524 234
525#define DO_ERROR(trapnr, signr, str, name) \ 235#define DO_ERROR(trapnr, signr, str, name) \
526void do_##name(struct pt_regs *regs, long error_code) \ 236dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
527{ \ 237{ \
528 trace_hardirqs_fixup(); \
529 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 238 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
530 == NOTIFY_STOP) \ 239 == NOTIFY_STOP) \
531 return; \ 240 return; \
532 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \ 241 conditional_sti(regs); \
242 do_trap(trapnr, signr, str, regs, error_code, NULL); \
533} 243}
534 244
535#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq) \ 245#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
536void do_##name(struct pt_regs *regs, long error_code) \ 246dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
537{ \ 247{ \
538 siginfo_t info; \ 248 siginfo_t info; \
539 if (irq) \
540 local_irq_enable(); \
541 info.si_signo = signr; \ 249 info.si_signo = signr; \
542 info.si_errno = 0; \ 250 info.si_errno = 0; \
543 info.si_code = sicode; \ 251 info.si_code = sicode; \
@@ -545,90 +253,68 @@ void do_##name(struct pt_regs *regs, long error_code) \
545 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 253 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
546 == NOTIFY_STOP) \ 254 == NOTIFY_STOP) \
547 return; \ 255 return; \
548 do_trap(trapnr, signr, str, 0, regs, error_code, &info); \ 256 conditional_sti(regs); \
257 do_trap(trapnr, signr, str, regs, error_code, &info); \
549} 258}
550 259
551#define DO_VM86_ERROR(trapnr, signr, str, name) \ 260DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
552void do_##name(struct pt_regs *regs, long error_code) \ 261DO_ERROR(4, SIGSEGV, "overflow", overflow)
553{ \ 262DO_ERROR(5, SIGSEGV, "bounds", bounds)
554 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 263DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
555 == NOTIFY_STOP) \
556 return; \
557 do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
558}
559
560#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
561void do_##name(struct pt_regs *regs, long error_code) \
562{ \
563 siginfo_t info; \
564 info.si_signo = signr; \
565 info.si_errno = 0; \
566 info.si_code = sicode; \
567 info.si_addr = (void __user *)siaddr; \
568 trace_hardirqs_fixup(); \
569 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
570 == NOTIFY_STOP) \
571 return; \
572 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
573}
574
575DO_VM86_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
576#ifndef CONFIG_KPROBES
577DO_VM86_ERROR(3, SIGTRAP, "int3", int3)
578#endif
579DO_VM86_ERROR(4, SIGSEGV, "overflow", overflow)
580DO_VM86_ERROR(5, SIGSEGV, "bounds", bounds)
581DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip, 0)
582DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) 264DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
583DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 265DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
584DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 266DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
267#ifdef CONFIG_X86_32
585DO_ERROR(12, SIGBUS, "stack segment", stack_segment) 268DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
586DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0) 269#endif
587DO_ERROR_INFO(32, SIGILL, "iret exception", iret_error, ILL_BADSTK, 0, 1) 270DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
271
272#ifdef CONFIG_X86_64
273/* Runs on IST stack */
274dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
275{
276 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
277 12, SIGBUS) == NOTIFY_STOP)
278 return;
279 preempt_conditional_sti(regs);
280 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
281 preempt_conditional_cli(regs);
282}
283
284dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
285{
286 static const char str[] = "double fault";
287 struct task_struct *tsk = current;
288
289 /* Return not checked because double check cannot be ignored */
290 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
588 291
589void __kprobes 292 tsk->thread.error_code = error_code;
293 tsk->thread.trap_no = 8;
294
295 /* This is always a kernel trap and never fixable (and thus must
296 never return). */
297 for (;;)
298 die(str, regs, error_code);
299}
300#endif
301
302dotraplinkage void __kprobes
590do_general_protection(struct pt_regs *regs, long error_code) 303do_general_protection(struct pt_regs *regs, long error_code)
591{ 304{
592 struct task_struct *tsk; 305 struct task_struct *tsk;
593 struct thread_struct *thread;
594 struct tss_struct *tss;
595 int cpu;
596 306
597 cpu = get_cpu(); 307 conditional_sti(regs);
598 tss = &per_cpu(init_tss, cpu);
599 thread = &current->thread;
600
601 /*
602 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
603 * invalid offset set (the LAZY one) and the faulting thread has
604 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS
605 * and we set the offset field correctly. Then we let the CPU to
606 * restart the faulting instruction.
607 */
608 if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
609 thread->io_bitmap_ptr) {
610 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
611 thread->io_bitmap_max);
612 /*
613 * If the previously set map was extending to higher ports
614 * than the current one, pad extra space with 0xff (no access).
615 */
616 if (thread->io_bitmap_max < tss->io_bitmap_max) {
617 memset((char *) tss->io_bitmap +
618 thread->io_bitmap_max, 0xff,
619 tss->io_bitmap_max - thread->io_bitmap_max);
620 }
621 tss->io_bitmap_max = thread->io_bitmap_max;
622 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
623 tss->io_bitmap_owner = thread;
624 put_cpu();
625 308
309#ifdef CONFIG_X86_32
310 if (lazy_iobitmap_copy()) {
311 /* restart the faulting instruction */
626 return; 312 return;
627 } 313 }
628 put_cpu();
629 314
630 if (regs->flags & X86_VM_MASK) 315 if (regs->flags & X86_VM_MASK)
631 goto gp_in_vm86; 316 goto gp_in_vm86;
317#endif
632 318
633 tsk = current; 319 tsk = current;
634 if (!user_mode(regs)) 320 if (!user_mode(regs))
@@ -650,10 +336,12 @@ do_general_protection(struct pt_regs *regs, long error_code)
650 force_sig(SIGSEGV, tsk); 336 force_sig(SIGSEGV, tsk);
651 return; 337 return;
652 338
339#ifdef CONFIG_X86_32
653gp_in_vm86: 340gp_in_vm86:
654 local_irq_enable(); 341 local_irq_enable();
655 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); 342 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
656 return; 343 return;
344#endif
657 345
658gp_in_kernel: 346gp_in_kernel:
659 if (fixup_exception(regs)) 347 if (fixup_exception(regs))
@@ -690,7 +378,8 @@ mem_parity_error(unsigned char reason, struct pt_regs *regs)
690 printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); 378 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
691 379
692 /* Clear and disable the memory parity error line. */ 380 /* Clear and disable the memory parity error line. */
693 clear_mem_error(reason); 381 reason = (reason & 0xf) | 4;
382 outb(reason, 0x61);
694} 383}
695 384
696static notrace __kprobes void 385static notrace __kprobes void
@@ -716,7 +405,8 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
716static notrace __kprobes void 405static notrace __kprobes void
717unknown_nmi_error(unsigned char reason, struct pt_regs *regs) 406unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
718{ 407{
719 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) 408 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
409 NOTIFY_STOP)
720 return; 410 return;
721#ifdef CONFIG_MCA 411#ifdef CONFIG_MCA
722 /* 412 /*
@@ -739,41 +429,6 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
739 printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); 429 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
740} 430}
741 431
742static DEFINE_SPINLOCK(nmi_print_lock);
743
744void notrace __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
745{
746 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
747 return;
748
749 spin_lock(&nmi_print_lock);
750 /*
751 * We are in trouble anyway, lets at least try
752 * to get a message out:
753 */
754 bust_spinlocks(1);
755 printk(KERN_EMERG "%s", str);
756 printk(" on CPU%d, ip %08lx, registers:\n",
757 smp_processor_id(), regs->ip);
758 show_registers(regs);
759 if (do_panic)
760 panic("Non maskable interrupt");
761 console_silent();
762 spin_unlock(&nmi_print_lock);
763 bust_spinlocks(0);
764
765 /*
766 * If we are in kernel we are probably nested up pretty bad
767 * and might aswell get out now while we still can:
768 */
769 if (!user_mode_vm(regs)) {
770 current->thread.trap_no = 2;
771 crash_kexec(regs);
772 }
773
774 do_exit(SIGSEGV);
775}
776
777static notrace __kprobes void default_do_nmi(struct pt_regs *regs) 432static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
778{ 433{
779 unsigned char reason = 0; 434 unsigned char reason = 0;
@@ -812,22 +467,25 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
812 mem_parity_error(reason, regs); 467 mem_parity_error(reason, regs);
813 if (reason & 0x40) 468 if (reason & 0x40)
814 io_check_error(reason, regs); 469 io_check_error(reason, regs);
470#ifdef CONFIG_X86_32
815 /* 471 /*
816 * Reassert NMI in case it became active meanwhile 472 * Reassert NMI in case it became active meanwhile
817 * as it's edge-triggered: 473 * as it's edge-triggered:
818 */ 474 */
819 reassert_nmi(); 475 reassert_nmi();
476#endif
820} 477}
821 478
822notrace __kprobes void do_nmi(struct pt_regs *regs, long error_code) 479dotraplinkage notrace __kprobes void
480do_nmi(struct pt_regs *regs, long error_code)
823{ 481{
824 int cpu;
825
826 nmi_enter(); 482 nmi_enter();
827 483
828 cpu = smp_processor_id(); 484#ifdef CONFIG_X86_32
829 485 { int cpu; cpu = smp_processor_id(); ++nmi_count(cpu); }
830 ++nmi_count(cpu); 486#else
487 add_pda(__nmi_count, 1);
488#endif
831 489
832 if (!ignore_nmis) 490 if (!ignore_nmis)
833 default_do_nmi(regs); 491 default_do_nmi(regs);
@@ -847,21 +505,44 @@ void restart_nmi(void)
847 acpi_nmi_enable(); 505 acpi_nmi_enable();
848} 506}
849 507
850#ifdef CONFIG_KPROBES 508/* May run on IST stack. */
851void __kprobes do_int3(struct pt_regs *regs, long error_code) 509dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
852{ 510{
853 trace_hardirqs_fixup(); 511#ifdef CONFIG_KPROBES
854
855 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) 512 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
856 == NOTIFY_STOP) 513 == NOTIFY_STOP)
857 return; 514 return;
858 /* 515#else
859 * This is an interrupt gate, because kprobes wants interrupts 516 if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP)
860 * disabled. Normal trap handlers don't. 517 == NOTIFY_STOP)
861 */ 518 return;
862 restore_interrupts(regs); 519#endif
520
521 preempt_conditional_sti(regs);
522 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
523 preempt_conditional_cli(regs);
524}
863 525
864 do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL); 526#ifdef CONFIG_X86_64
527/* Help handler running on IST stack to switch back to user stack
528 for scheduling or signal handling. The actual stack switch is done in
529 entry.S */
530asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
531{
532 struct pt_regs *regs = eregs;
533 /* Did already sync */
534 if (eregs == (struct pt_regs *)eregs->sp)
535 ;
536 /* Exception from user space */
537 else if (user_mode(eregs))
538 regs = task_pt_regs(current);
539 /* Exception from kernel and interrupts are enabled. Move to
540 kernel process stack. */
541 else if (eregs->flags & X86_EFLAGS_IF)
542 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
543 if (eregs != regs)
544 *regs = *eregs;
545 return regs;
865} 546}
866#endif 547#endif
867 548
@@ -886,13 +567,14 @@ void __kprobes do_int3(struct pt_regs *regs, long error_code)
886 * about restoring all the debug state, and ptrace doesn't have to 567 * about restoring all the debug state, and ptrace doesn't have to
887 * find every occurrence of the TF bit that could be saved away even 568 * find every occurrence of the TF bit that could be saved away even
888 * by user code) 569 * by user code)
570 *
571 * May run on IST stack.
889 */ 572 */
890void __kprobes do_debug(struct pt_regs *regs, long error_code) 573dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
891{ 574{
892 struct task_struct *tsk = current; 575 struct task_struct *tsk = current;
893 unsigned int condition; 576 unsigned long condition;
894 577 int si_code;
895 trace_hardirqs_fixup();
896 578
897 get_debugreg(condition, 6); 579 get_debugreg(condition, 6);
898 580
@@ -905,9 +587,9 @@ void __kprobes do_debug(struct pt_regs *regs, long error_code)
905 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, 587 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
906 SIGTRAP) == NOTIFY_STOP) 588 SIGTRAP) == NOTIFY_STOP)
907 return; 589 return;
590
908 /* It's safe to allow irq's after DR6 has been saved */ 591 /* It's safe to allow irq's after DR6 has been saved */
909 if (regs->flags & X86_EFLAGS_IF) 592 preempt_conditional_sti(regs);
910 local_irq_enable();
911 593
912 /* Mask out spurious debug traps due to lazy DR7 setting */ 594 /* Mask out spurious debug traps due to lazy DR7 setting */
913 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { 595 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
@@ -915,8 +597,10 @@ void __kprobes do_debug(struct pt_regs *regs, long error_code)
915 goto clear_dr7; 597 goto clear_dr7;
916 } 598 }
917 599
600#ifdef CONFIG_X86_32
918 if (regs->flags & X86_VM_MASK) 601 if (regs->flags & X86_VM_MASK)
919 goto debug_vm86; 602 goto debug_vm86;
603#endif
920 604
921 /* Save debug status register where ptrace can see it */ 605 /* Save debug status register where ptrace can see it */
922 tsk->thread.debugreg6 = condition; 606 tsk->thread.debugreg6 = condition;
@@ -926,17 +610,13 @@ void __kprobes do_debug(struct pt_regs *regs, long error_code)
926 * kernel space (but re-enable TF when returning to user mode). 610 * kernel space (but re-enable TF when returning to user mode).
927 */ 611 */
928 if (condition & DR_STEP) { 612 if (condition & DR_STEP) {
929 /*
930 * We already checked v86 mode above, so we can
931 * check for kernel mode by just checking the CPL
932 * of CS.
933 */
934 if (!user_mode(regs)) 613 if (!user_mode(regs))
935 goto clear_TF_reenable; 614 goto clear_TF_reenable;
936 } 615 }
937 616
617 si_code = get_si_code(condition);
938 /* Ok, finally something we can handle */ 618 /* Ok, finally something we can handle */
939 send_sigtrap(tsk, regs, error_code); 619 send_sigtrap(tsk, regs, error_code, si_code);
940 620
941 /* 621 /*
942 * Disable additional traps. They'll be re-enabled when 622 * Disable additional traps. They'll be re-enabled when
@@ -944,18 +624,37 @@ void __kprobes do_debug(struct pt_regs *regs, long error_code)
944 */ 624 */
945clear_dr7: 625clear_dr7:
946 set_debugreg(0, 7); 626 set_debugreg(0, 7);
627 preempt_conditional_cli(regs);
947 return; 628 return;
948 629
630#ifdef CONFIG_X86_32
949debug_vm86: 631debug_vm86:
950 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); 632 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
633 preempt_conditional_cli(regs);
951 return; 634 return;
635#endif
952 636
953clear_TF_reenable: 637clear_TF_reenable:
954 set_tsk_thread_flag(tsk, TIF_SINGLESTEP); 638 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
955 regs->flags &= ~X86_EFLAGS_TF; 639 regs->flags &= ~X86_EFLAGS_TF;
640 preempt_conditional_cli(regs);
956 return; 641 return;
957} 642}
958 643
644#ifdef CONFIG_X86_64
645static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
646{
647 if (fixup_exception(regs))
648 return 1;
649
650 notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
651 /* Illegal floating point operation in the kernel */
652 current->thread.trap_no = trapnr;
653 die(str, regs, 0);
654 return 0;
655}
656#endif
657
959/* 658/*
960 * Note that we play around with the 'TS' bit in an attempt to get 659 * Note that we play around with the 'TS' bit in an attempt to get
961 * the correct behaviour even in the presence of the asynchronous 660 * the correct behaviour even in the presence of the asynchronous
@@ -992,7 +691,9 @@ void math_error(void __user *ip)
992 swd = get_fpu_swd(task); 691 swd = get_fpu_swd(task);
993 switch (swd & ~cwd & 0x3f) { 692 switch (swd & ~cwd & 0x3f) {
994 case 0x000: /* No unmasked exception */ 693 case 0x000: /* No unmasked exception */
694#ifdef CONFIG_X86_32
995 return; 695 return;
696#endif
996 default: /* Multiple exceptions */ 697 default: /* Multiple exceptions */
997 break; 698 break;
998 case 0x001: /* Invalid Op */ 699 case 0x001: /* Invalid Op */
@@ -1020,9 +721,18 @@ void math_error(void __user *ip)
1020 force_sig_info(SIGFPE, &info, task); 721 force_sig_info(SIGFPE, &info, task);
1021} 722}
1022 723
1023void do_coprocessor_error(struct pt_regs *regs, long error_code) 724dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
1024{ 725{
726 conditional_sti(regs);
727
728#ifdef CONFIG_X86_32
1025 ignore_fpu_irq = 1; 729 ignore_fpu_irq = 1;
730#else
731 if (!user_mode(regs) &&
732 kernel_math_error(regs, "kernel x87 math error", 16))
733 return;
734#endif
735
1026 math_error((void __user *)regs->ip); 736 math_error((void __user *)regs->ip);
1027} 737}
1028 738
@@ -1074,8 +784,12 @@ static void simd_math_error(void __user *ip)
1074 force_sig_info(SIGFPE, &info, task); 784 force_sig_info(SIGFPE, &info, task);
1075} 785}
1076 786
1077void do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 787dotraplinkage void
788do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
1078{ 789{
790 conditional_sti(regs);
791
792#ifdef CONFIG_X86_32
1079 if (cpu_has_xmm) { 793 if (cpu_has_xmm) {
1080 /* Handle SIMD FPU exceptions on PIII+ processors. */ 794 /* Handle SIMD FPU exceptions on PIII+ processors. */
1081 ignore_fpu_irq = 1; 795 ignore_fpu_irq = 1;
@@ -1094,16 +808,25 @@ void do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
1094 current->thread.error_code = error_code; 808 current->thread.error_code = error_code;
1095 die_if_kernel("cache flush denied", regs, error_code); 809 die_if_kernel("cache flush denied", regs, error_code);
1096 force_sig(SIGSEGV, current); 810 force_sig(SIGSEGV, current);
811#else
812 if (!user_mode(regs) &&
813 kernel_math_error(regs, "kernel simd math error", 19))
814 return;
815 simd_math_error((void __user *)regs->ip);
816#endif
1097} 817}
1098 818
1099void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) 819dotraplinkage void
820do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
1100{ 821{
822 conditional_sti(regs);
1101#if 0 823#if 0
1102 /* No need to warn about this any longer. */ 824 /* No need to warn about this any longer. */
1103 printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); 825 printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
1104#endif 826#endif
1105} 827}
1106 828
829#ifdef CONFIG_X86_32
1107unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp) 830unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
1108{ 831{
1109 struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id()); 832 struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id());
@@ -1122,6 +845,15 @@ unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
1122 845
1123 return new_kesp; 846 return new_kesp;
1124} 847}
848#else
849asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
850{
851}
852
853asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
854{
855}
856#endif
1125 857
1126/* 858/*
1127 * 'math_state_restore()' saves the current math information in the 859 * 'math_state_restore()' saves the current math information in the
@@ -1154,14 +886,24 @@ asmlinkage void math_state_restore(void)
1154 } 886 }
1155 887
1156 clts(); /* Allow maths ops (or we recurse) */ 888 clts(); /* Allow maths ops (or we recurse) */
889#ifdef CONFIG_X86_32
1157 restore_fpu(tsk); 890 restore_fpu(tsk);
891#else
892 /*
893 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
894 */
895 if (unlikely(restore_fpu_checking(tsk))) {
896 stts();
897 force_sig(SIGSEGV, tsk);
898 return;
899 }
900#endif
1158 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ 901 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
1159 tsk->fpu_counter++; 902 tsk->fpu_counter++;
1160} 903}
1161EXPORT_SYMBOL_GPL(math_state_restore); 904EXPORT_SYMBOL_GPL(math_state_restore);
1162 905
1163#ifndef CONFIG_MATH_EMULATION 906#ifndef CONFIG_MATH_EMULATION
1164
1165asmlinkage void math_emulate(long arg) 907asmlinkage void math_emulate(long arg)
1166{ 908{
1167 printk(KERN_EMERG 909 printk(KERN_EMERG
@@ -1170,12 +912,54 @@ asmlinkage void math_emulate(long arg)
1170 force_sig(SIGFPE, current); 912 force_sig(SIGFPE, current);
1171 schedule(); 913 schedule();
1172} 914}
1173
1174#endif /* CONFIG_MATH_EMULATION */ 915#endif /* CONFIG_MATH_EMULATION */
1175 916
917dotraplinkage void __kprobes
918do_device_not_available(struct pt_regs *regs, long error)
919{
920#ifdef CONFIG_X86_32
921 if (read_cr0() & X86_CR0_EM) {
922 conditional_sti(regs);
923 math_emulate(0);
924 } else {
925 math_state_restore(); /* interrupts still off */
926 conditional_sti(regs);
927 }
928#else
929 math_state_restore();
930#endif
931}
932
933#ifdef CONFIG_X86_32
934#ifdef CONFIG_X86_MCE
935dotraplinkage void __kprobes do_machine_check(struct pt_regs *regs, long error)
936{
937 conditional_sti(regs);
938 machine_check_vector(regs, error);
939}
940#endif
941
942dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
943{
944 siginfo_t info;
945 local_irq_enable();
946
947 info.si_signo = SIGILL;
948 info.si_errno = 0;
949 info.si_code = ILL_BADSTK;
950 info.si_addr = 0;
951 if (notify_die(DIE_TRAP, "iret exception",
952 regs, error_code, 32, SIGILL) == NOTIFY_STOP)
953 return;
954 do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
955}
956#endif
957
1176void __init trap_init(void) 958void __init trap_init(void)
1177{ 959{
960#ifdef CONFIG_X86_32
1178 int i; 961 int i;
962#endif
1179 963
1180#ifdef CONFIG_EISA 964#ifdef CONFIG_EISA
1181 void __iomem *p = early_ioremap(0x0FFFD9, 4); 965 void __iomem *p = early_ioremap(0x0FFFD9, 4);
@@ -1185,29 +969,40 @@ void __init trap_init(void)
1185 early_iounmap(p, 4); 969 early_iounmap(p, 4);
1186#endif 970#endif
1187 971
1188 set_trap_gate(0, &divide_error); 972 set_intr_gate(0, &divide_error);
1189 set_intr_gate(1, &debug); 973 set_intr_gate_ist(1, &debug, DEBUG_STACK);
1190 set_intr_gate(2, &nmi); 974 set_intr_gate_ist(2, &nmi, NMI_STACK);
1191 set_system_intr_gate(3, &int3); /* int3 can be called from all */ 975 /* int3 can be called from all */
1192 set_system_gate(4, &overflow); /* int4 can be called from all */ 976 set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
1193 set_trap_gate(5, &bounds); 977 /* int4 can be called from all */
1194 set_trap_gate(6, &invalid_op); 978 set_system_intr_gate(4, &overflow);
1195 set_trap_gate(7, &device_not_available); 979 set_intr_gate(5, &bounds);
980 set_intr_gate(6, &invalid_op);
981 set_intr_gate(7, &device_not_available);
982#ifdef CONFIG_X86_32
1196 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); 983 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
1197 set_trap_gate(9, &coprocessor_segment_overrun); 984#else
1198 set_trap_gate(10, &invalid_TSS); 985 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
1199 set_trap_gate(11, &segment_not_present); 986#endif
1200 set_trap_gate(12, &stack_segment); 987 set_intr_gate(9, &coprocessor_segment_overrun);
1201 set_trap_gate(13, &general_protection); 988 set_intr_gate(10, &invalid_TSS);
989 set_intr_gate(11, &segment_not_present);
990 set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
991 set_intr_gate(13, &general_protection);
1202 set_intr_gate(14, &page_fault); 992 set_intr_gate(14, &page_fault);
1203 set_trap_gate(15, &spurious_interrupt_bug); 993 set_intr_gate(15, &spurious_interrupt_bug);
1204 set_trap_gate(16, &coprocessor_error); 994 set_intr_gate(16, &coprocessor_error);
1205 set_trap_gate(17, &alignment_check); 995 set_intr_gate(17, &alignment_check);
1206#ifdef CONFIG_X86_MCE 996#ifdef CONFIG_X86_MCE
1207 set_trap_gate(18, &machine_check); 997 set_intr_gate_ist(18, &machine_check, MCE_STACK);
1208#endif 998#endif
1209 set_trap_gate(19, &simd_coprocessor_error); 999 set_intr_gate(19, &simd_coprocessor_error);
1210 1000
1001#ifdef CONFIG_IA32_EMULATION
1002 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
1003#endif
1004
1005#ifdef CONFIG_X86_32
1211 if (cpu_has_fxsr) { 1006 if (cpu_has_fxsr) {
1212 printk(KERN_INFO "Enabling fast FPU save and restore... "); 1007 printk(KERN_INFO "Enabling fast FPU save and restore... ");
1213 set_in_cr4(X86_CR4_OSFXSR); 1008 set_in_cr4(X86_CR4_OSFXSR);
@@ -1220,37 +1015,20 @@ void __init trap_init(void)
1220 printk("done.\n"); 1015 printk("done.\n");
1221 } 1016 }
1222 1017
1223 set_system_gate(SYSCALL_VECTOR, &system_call); 1018 set_system_trap_gate(SYSCALL_VECTOR, &system_call);
1224 1019
1225 /* Reserve all the builtin and the syscall vector: */ 1020 /* Reserve all the builtin and the syscall vector: */
1226 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 1021 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
1227 set_bit(i, used_vectors); 1022 set_bit(i, used_vectors);
1228 1023
1229 set_bit(SYSCALL_VECTOR, used_vectors); 1024 set_bit(SYSCALL_VECTOR, used_vectors);
1230 1025#endif
1231 init_thread_xstate();
1232 /* 1026 /*
1233 * Should be a barrier for any external CPU state: 1027 * Should be a barrier for any external CPU state:
1234 */ 1028 */
1235 cpu_init(); 1029 cpu_init();
1236 1030
1031#ifdef CONFIG_X86_32
1237 trap_init_hook(); 1032 trap_init_hook();
1033#endif
1238} 1034}
1239
1240static int __init kstack_setup(char *s)
1241{
1242 kstack_depth_to_print = simple_strtoul(s, NULL, 0);
1243
1244 return 1;
1245}
1246__setup("kstack=", kstack_setup);
1247
1248static int __init code_bytes_setup(char *s)
1249{
1250 code_bytes = simple_strtoul(s, NULL, 0);
1251 if (code_bytes > 8192)
1252 code_bytes = 8192;
1253
1254 return 1;
1255}
1256__setup("code_bytes=", code_bytes_setup);
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
deleted file mode 100644
index 513caaca7115..000000000000
--- a/arch/x86/kernel/traps_64.c
+++ /dev/null
@@ -1,1212 +0,0 @@
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 *
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8
9/*
10 * 'Traps.c' handles hardware traps and faults after we have saved some
11 * state in 'entry.S'.
12 */
13#include <linux/moduleparam.h>
14#include <linux/interrupt.h>
15#include <linux/kallsyms.h>
16#include <linux/spinlock.h>
17#include <linux/kprobes.h>
18#include <linux/uaccess.h>
19#include <linux/utsname.h>
20#include <linux/kdebug.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/ptrace.h>
24#include <linux/string.h>
25#include <linux/unwind.h>
26#include <linux/delay.h>
27#include <linux/errno.h>
28#include <linux/kexec.h>
29#include <linux/sched.h>
30#include <linux/timer.h>
31#include <linux/init.h>
32#include <linux/bug.h>
33#include <linux/nmi.h>
34#include <linux/mm.h>
35
36#if defined(CONFIG_EDAC)
37#include <linux/edac.h>
38#endif
39
40#include <asm/stacktrace.h>
41#include <asm/processor.h>
42#include <asm/debugreg.h>
43#include <asm/atomic.h>
44#include <asm/system.h>
45#include <asm/unwind.h>
46#include <asm/desc.h>
47#include <asm/i387.h>
48#include <asm/nmi.h>
49#include <asm/smp.h>
50#include <asm/io.h>
51#include <asm/pgalloc.h>
52#include <asm/proto.h>
53#include <asm/pda.h>
54#include <asm/traps.h>
55
56#include <mach_traps.h>
57
58int panic_on_unrecovered_nmi;
59int kstack_depth_to_print = 12;
60static unsigned int code_bytes = 64;
61static int ignore_nmis;
62static int die_counter;
63
64static inline void conditional_sti(struct pt_regs *regs)
65{
66 if (regs->flags & X86_EFLAGS_IF)
67 local_irq_enable();
68}
69
70static inline void preempt_conditional_sti(struct pt_regs *regs)
71{
72 inc_preempt_count();
73 if (regs->flags & X86_EFLAGS_IF)
74 local_irq_enable();
75}
76
77static inline void preempt_conditional_cli(struct pt_regs *regs)
78{
79 if (regs->flags & X86_EFLAGS_IF)
80 local_irq_disable();
81 /* Make sure to not schedule here because we could be running
82 on an exception stack. */
83 dec_preempt_count();
84}
85
86void printk_address(unsigned long address, int reliable)
87{
88 printk(" [<%016lx>] %s%pS\n", address, reliable ? "": "? ", (void *) address);
89}
90
91static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
92 unsigned *usedp, char **idp)
93{
94 static char ids[][8] = {
95 [DEBUG_STACK - 1] = "#DB",
96 [NMI_STACK - 1] = "NMI",
97 [DOUBLEFAULT_STACK - 1] = "#DF",
98 [STACKFAULT_STACK - 1] = "#SS",
99 [MCE_STACK - 1] = "#MC",
100#if DEBUG_STKSZ > EXCEPTION_STKSZ
101 [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
102#endif
103 };
104 unsigned k;
105
106 /*
107 * Iterate over all exception stacks, and figure out whether
108 * 'stack' is in one of them:
109 */
110 for (k = 0; k < N_EXCEPTION_STACKS; k++) {
111 unsigned long end = per_cpu(orig_ist, cpu).ist[k];
112 /*
113 * Is 'stack' above this exception frame's end?
114 * If yes then skip to the next frame.
115 */
116 if (stack >= end)
117 continue;
118 /*
119 * Is 'stack' above this exception frame's start address?
120 * If yes then we found the right frame.
121 */
122 if (stack >= end - EXCEPTION_STKSZ) {
123 /*
124 * Make sure we only iterate through an exception
125 * stack once. If it comes up for the second time
126 * then there's something wrong going on - just
127 * break out and return NULL:
128 */
129 if (*usedp & (1U << k))
130 break;
131 *usedp |= 1U << k;
132 *idp = ids[k];
133 return (unsigned long *)end;
134 }
135 /*
136 * If this is a debug stack, and if it has a larger size than
137 * the usual exception stacks, then 'stack' might still
138 * be within the lower portion of the debug stack:
139 */
140#if DEBUG_STKSZ > EXCEPTION_STKSZ
141 if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
142 unsigned j = N_EXCEPTION_STACKS - 1;
143
144 /*
145 * Black magic. A large debug stack is composed of
146 * multiple exception stack entries, which we
147 * iterate through now. Dont look:
148 */
149 do {
150 ++j;
151 end -= EXCEPTION_STKSZ;
152 ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
153 } while (stack < end - EXCEPTION_STKSZ);
154 if (*usedp & (1U << j))
155 break;
156 *usedp |= 1U << j;
157 *idp = ids[j];
158 return (unsigned long *)end;
159 }
160#endif
161 }
162 return NULL;
163}
164
165/*
166 * x86-64 can have up to three kernel stacks:
167 * process stack
168 * interrupt stack
169 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
170 */
171
172static inline int valid_stack_ptr(struct thread_info *tinfo,
173 void *p, unsigned int size, void *end)
174{
175 void *t = tinfo;
176 if (end) {
177 if (p < end && p >= (end-THREAD_SIZE))
178 return 1;
179 else
180 return 0;
181 }
182 return p > t && p < t + THREAD_SIZE - size;
183}
184
185/* The form of the top of the frame on the stack */
186struct stack_frame {
187 struct stack_frame *next_frame;
188 unsigned long return_address;
189};
190
191static inline unsigned long
192print_context_stack(struct thread_info *tinfo,
193 unsigned long *stack, unsigned long bp,
194 const struct stacktrace_ops *ops, void *data,
195 unsigned long *end)
196{
197 struct stack_frame *frame = (struct stack_frame *)bp;
198
199 while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
200 unsigned long addr;
201
202 addr = *stack;
203 if (__kernel_text_address(addr)) {
204 if ((unsigned long) stack == bp + 8) {
205 ops->address(data, addr, 1);
206 frame = frame->next_frame;
207 bp = (unsigned long) frame;
208 } else {
209 ops->address(data, addr, bp == 0);
210 }
211 }
212 stack++;
213 }
214 return bp;
215}
216
217void dump_trace(struct task_struct *task, struct pt_regs *regs,
218 unsigned long *stack, unsigned long bp,
219 const struct stacktrace_ops *ops, void *data)
220{
221 const unsigned cpu = get_cpu();
222 unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
223 unsigned used = 0;
224 struct thread_info *tinfo;
225
226 if (!task)
227 task = current;
228
229 if (!stack) {
230 unsigned long dummy;
231 stack = &dummy;
232 if (task && task != current)
233 stack = (unsigned long *)task->thread.sp;
234 }
235
236#ifdef CONFIG_FRAME_POINTER
237 if (!bp) {
238 if (task == current) {
239 /* Grab bp right from our regs */
240 asm("movq %%rbp, %0" : "=r" (bp) :);
241 } else {
242 /* bp is the last reg pushed by switch_to */
243 bp = *(unsigned long *) task->thread.sp;
244 }
245 }
246#endif
247
248 /*
249 * Print function call entries in all stacks, starting at the
250 * current stack address. If the stacks consist of nested
251 * exceptions
252 */
253 tinfo = task_thread_info(task);
254 for (;;) {
255 char *id;
256 unsigned long *estack_end;
257 estack_end = in_exception_stack(cpu, (unsigned long)stack,
258 &used, &id);
259
260 if (estack_end) {
261 if (ops->stack(data, id) < 0)
262 break;
263
264 bp = print_context_stack(tinfo, stack, bp, ops,
265 data, estack_end);
266 ops->stack(data, "<EOE>");
267 /*
268 * We link to the next stack via the
269 * second-to-last pointer (index -2 to end) in the
270 * exception stack:
271 */
272 stack = (unsigned long *) estack_end[-2];
273 continue;
274 }
275 if (irqstack_end) {
276 unsigned long *irqstack;
277 irqstack = irqstack_end -
278 (IRQSTACKSIZE - 64) / sizeof(*irqstack);
279
280 if (stack >= irqstack && stack < irqstack_end) {
281 if (ops->stack(data, "IRQ") < 0)
282 break;
283 bp = print_context_stack(tinfo, stack, bp,
284 ops, data, irqstack_end);
285 /*
286 * We link to the next stack (which would be
287 * the process stack normally) the last
288 * pointer (index -1 to end) in the IRQ stack:
289 */
290 stack = (unsigned long *) (irqstack_end[-1]);
291 irqstack_end = NULL;
292 ops->stack(data, "EOI");
293 continue;
294 }
295 }
296 break;
297 }
298
299 /*
300 * This handles the process stack:
301 */
302 bp = print_context_stack(tinfo, stack, bp, ops, data, NULL);
303 put_cpu();
304}
305EXPORT_SYMBOL(dump_trace);
306
307static void
308print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
309{
310 print_symbol(msg, symbol);
311 printk("\n");
312}
313
314static void print_trace_warning(void *data, char *msg)
315{
316 printk("%s\n", msg);
317}
318
319static int print_trace_stack(void *data, char *name)
320{
321 printk(" <%s> ", name);
322 return 0;
323}
324
325static void print_trace_address(void *data, unsigned long addr, int reliable)
326{
327 touch_nmi_watchdog();
328 printk_address(addr, reliable);
329}
330
331static const struct stacktrace_ops print_trace_ops = {
332 .warning = print_trace_warning,
333 .warning_symbol = print_trace_warning_symbol,
334 .stack = print_trace_stack,
335 .address = print_trace_address,
336};
337
338static void
339show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
340 unsigned long *stack, unsigned long bp, char *log_lvl)
341{
342 printk("\nCall Trace:\n");
343 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
344 printk("\n");
345}
346
347void show_trace(struct task_struct *task, struct pt_regs *regs,
348 unsigned long *stack, unsigned long bp)
349{
350 show_trace_log_lvl(task, regs, stack, bp, "");
351}
352
353static void
354show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
355 unsigned long *sp, unsigned long bp, char *log_lvl)
356{
357 unsigned long *stack;
358 int i;
359 const int cpu = smp_processor_id();
360 unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
361 unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
362
363 // debugging aid: "show_stack(NULL, NULL);" prints the
364 // back trace for this cpu.
365
366 if (sp == NULL) {
367 if (task)
368 sp = (unsigned long *)task->thread.sp;
369 else
370 sp = (unsigned long *)&sp;
371 }
372
373 stack = sp;
374 for (i = 0; i < kstack_depth_to_print; i++) {
375 if (stack >= irqstack && stack <= irqstack_end) {
376 if (stack == irqstack_end) {
377 stack = (unsigned long *) (irqstack_end[-1]);
378 printk(" <EOI> ");
379 }
380 } else {
381 if (((long) stack & (THREAD_SIZE-1)) == 0)
382 break;
383 }
384 if (i && ((i % 4) == 0))
385 printk("\n");
386 printk(" %016lx", *stack++);
387 touch_nmi_watchdog();
388 }
389 show_trace_log_lvl(task, regs, sp, bp, log_lvl);
390}
391
392void show_stack(struct task_struct *task, unsigned long *sp)
393{
394 show_stack_log_lvl(task, NULL, sp, 0, "");
395}
396
397/*
398 * The architecture-independent dump_stack generator
399 */
400void dump_stack(void)
401{
402 unsigned long bp = 0;
403 unsigned long stack;
404
405#ifdef CONFIG_FRAME_POINTER
406 if (!bp)
407 asm("movq %%rbp, %0" : "=r" (bp):);
408#endif
409
410 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
411 current->pid, current->comm, print_tainted(),
412 init_utsname()->release,
413 (int)strcspn(init_utsname()->version, " "),
414 init_utsname()->version);
415 show_trace(NULL, NULL, &stack, bp);
416}
417
418EXPORT_SYMBOL(dump_stack);
419
420void show_registers(struct pt_regs *regs)
421{
422 int i;
423 unsigned long sp;
424 const int cpu = smp_processor_id();
425 struct task_struct *cur = cpu_pda(cpu)->pcurrent;
426
427 sp = regs->sp;
428 printk("CPU %d ", cpu);
429 __show_regs(regs);
430 printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
431 cur->comm, cur->pid, task_thread_info(cur), cur);
432
433 /*
434 * When in-kernel, we also print out the stack and code at the
435 * time of the fault..
436 */
437 if (!user_mode(regs)) {
438 unsigned int code_prologue = code_bytes * 43 / 64;
439 unsigned int code_len = code_bytes;
440 unsigned char c;
441 u8 *ip;
442
443 printk("Stack: ");
444 show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
445 regs->bp, "");
446 printk("\n");
447
448 printk(KERN_EMERG "Code: ");
449
450 ip = (u8 *)regs->ip - code_prologue;
451 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
452 /* try starting at RIP */
453 ip = (u8 *)regs->ip;
454 code_len = code_len - code_prologue + 1;
455 }
456 for (i = 0; i < code_len; i++, ip++) {
457 if (ip < (u8 *)PAGE_OFFSET ||
458 probe_kernel_address(ip, c)) {
459 printk(" Bad RIP value.");
460 break;
461 }
462 if (ip == (u8 *)regs->ip)
463 printk("<%02x> ", c);
464 else
465 printk("%02x ", c);
466 }
467 }
468 printk("\n");
469}
470
471int is_valid_bugaddr(unsigned long ip)
472{
473 unsigned short ud2;
474
475 if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
476 return 0;
477
478 return ud2 == 0x0b0f;
479}
480
481static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
482static int die_owner = -1;
483static unsigned int die_nest_count;
484
485unsigned __kprobes long oops_begin(void)
486{
487 int cpu;
488 unsigned long flags;
489
490 oops_enter();
491
492 /* racy, but better than risking deadlock. */
493 raw_local_irq_save(flags);
494 cpu = smp_processor_id();
495 if (!__raw_spin_trylock(&die_lock)) {
496 if (cpu == die_owner)
497 /* nested oops. should stop eventually */;
498 else
499 __raw_spin_lock(&die_lock);
500 }
501 die_nest_count++;
502 die_owner = cpu;
503 console_verbose();
504 bust_spinlocks(1);
505 return flags;
506}
507
508void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
509{
510 die_owner = -1;
511 bust_spinlocks(0);
512 die_nest_count--;
513 if (!die_nest_count)
514 /* Nest count reaches zero, release the lock. */
515 __raw_spin_unlock(&die_lock);
516 raw_local_irq_restore(flags);
517 if (!regs) {
518 oops_exit();
519 return;
520 }
521 if (panic_on_oops)
522 panic("Fatal exception");
523 oops_exit();
524 do_exit(signr);
525}
526
527int __kprobes __die(const char *str, struct pt_regs *regs, long err)
528{
529 printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff, ++die_counter);
530#ifdef CONFIG_PREEMPT
531 printk("PREEMPT ");
532#endif
533#ifdef CONFIG_SMP
534 printk("SMP ");
535#endif
536#ifdef CONFIG_DEBUG_PAGEALLOC
537 printk("DEBUG_PAGEALLOC");
538#endif
539 printk("\n");
540 if (notify_die(DIE_OOPS, str, regs, err,
541 current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
542 return 1;
543
544 show_registers(regs);
545 add_taint(TAINT_DIE);
546 /* Executive summary in case the oops scrolled away */
547 printk(KERN_ALERT "RIP ");
548 printk_address(regs->ip, 1);
549 printk(" RSP <%016lx>\n", regs->sp);
550 if (kexec_should_crash(current))
551 crash_kexec(regs);
552 return 0;
553}
554
555void die(const char *str, struct pt_regs *regs, long err)
556{
557 unsigned long flags = oops_begin();
558
559 if (!user_mode(regs))
560 report_bug(regs->ip, regs);
561
562 if (__die(str, regs, err))
563 regs = NULL;
564 oops_end(flags, regs, SIGSEGV);
565}
566
567notrace __kprobes void
568die_nmi(char *str, struct pt_regs *regs, int do_panic)
569{
570 unsigned long flags;
571
572 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
573 return;
574
575 flags = oops_begin();
576 /*
577 * We are in trouble anyway, lets at least try
578 * to get a message out.
579 */
580 printk(KERN_EMERG "%s", str);
581 printk(" on CPU%d, ip %08lx, registers:\n",
582 smp_processor_id(), regs->ip);
583 show_registers(regs);
584 if (kexec_should_crash(current))
585 crash_kexec(regs);
586 if (do_panic || panic_on_oops)
587 panic("Non maskable interrupt");
588 oops_end(flags, NULL, SIGBUS);
589 nmi_exit();
590 local_irq_enable();
591 do_exit(SIGBUS);
592}
593
594static void __kprobes
595do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
596 long error_code, siginfo_t *info)
597{
598 struct task_struct *tsk = current;
599
600 if (!user_mode(regs))
601 goto kernel_trap;
602
603 /*
604 * We want error_code and trap_no set for userspace faults and
605 * kernelspace faults which result in die(), but not
606 * kernelspace faults which are fixed up. die() gives the
607 * process no chance to handle the signal and notice the
608 * kernel fault information, so that won't result in polluting
609 * the information about previously queued, but not yet
610 * delivered, faults. See also do_general_protection below.
611 */
612 tsk->thread.error_code = error_code;
613 tsk->thread.trap_no = trapnr;
614
615 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
616 printk_ratelimit()) {
617 printk(KERN_INFO
618 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
619 tsk->comm, tsk->pid, str,
620 regs->ip, regs->sp, error_code);
621 print_vma_addr(" in ", regs->ip);
622 printk("\n");
623 }
624
625 if (info)
626 force_sig_info(signr, info, tsk);
627 else
628 force_sig(signr, tsk);
629 return;
630
631kernel_trap:
632 if (!fixup_exception(regs)) {
633 tsk->thread.error_code = error_code;
634 tsk->thread.trap_no = trapnr;
635 die(str, regs, error_code);
636 }
637 return;
638}
639
640#define DO_ERROR(trapnr, signr, str, name) \
641asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
642{ \
643 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
644 == NOTIFY_STOP) \
645 return; \
646 conditional_sti(regs); \
647 do_trap(trapnr, signr, str, regs, error_code, NULL); \
648}
649
650#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
651asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
652{ \
653 siginfo_t info; \
654 info.si_signo = signr; \
655 info.si_errno = 0; \
656 info.si_code = sicode; \
657 info.si_addr = (void __user *)siaddr; \
658 trace_hardirqs_fixup(); \
659 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
660 == NOTIFY_STOP) \
661 return; \
662 conditional_sti(regs); \
663 do_trap(trapnr, signr, str, regs, error_code, &info); \
664}
665
666DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
667DO_ERROR(4, SIGSEGV, "overflow", overflow)
668DO_ERROR(5, SIGSEGV, "bounds", bounds)
669DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
670DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
671DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
672DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
673DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
674
675/* Runs on IST stack */
676asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
677{
678 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
679 12, SIGBUS) == NOTIFY_STOP)
680 return;
681 preempt_conditional_sti(regs);
682 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
683 preempt_conditional_cli(regs);
684}
685
686asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
687{
688 static const char str[] = "double fault";
689 struct task_struct *tsk = current;
690
691 /* Return not checked because double check cannot be ignored */
692 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
693
694 tsk->thread.error_code = error_code;
695 tsk->thread.trap_no = 8;
696
697 /* This is always a kernel trap and never fixable (and thus must
698 never return). */
699 for (;;)
700 die(str, regs, error_code);
701}
702
703asmlinkage void __kprobes
704do_general_protection(struct pt_regs *regs, long error_code)
705{
706 struct task_struct *tsk;
707
708 conditional_sti(regs);
709
710 tsk = current;
711 if (!user_mode(regs))
712 goto gp_in_kernel;
713
714 tsk->thread.error_code = error_code;
715 tsk->thread.trap_no = 13;
716
717 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
718 printk_ratelimit()) {
719 printk(KERN_INFO
720 "%s[%d] general protection ip:%lx sp:%lx error:%lx",
721 tsk->comm, tsk->pid,
722 regs->ip, regs->sp, error_code);
723 print_vma_addr(" in ", regs->ip);
724 printk("\n");
725 }
726
727 force_sig(SIGSEGV, tsk);
728 return;
729
730gp_in_kernel:
731 if (fixup_exception(regs))
732 return;
733
734 tsk->thread.error_code = error_code;
735 tsk->thread.trap_no = 13;
736 if (notify_die(DIE_GPF, "general protection fault", regs,
737 error_code, 13, SIGSEGV) == NOTIFY_STOP)
738 return;
739 die("general protection fault", regs, error_code);
740}
741
742static notrace __kprobes void
743mem_parity_error(unsigned char reason, struct pt_regs *regs)
744{
745 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
746 reason);
747 printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
748
749#if defined(CONFIG_EDAC)
750 if (edac_handler_set()) {
751 edac_atomic_assert_error();
752 return;
753 }
754#endif
755
756 if (panic_on_unrecovered_nmi)
757 panic("NMI: Not continuing");
758
759 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
760
761 /* Clear and disable the memory parity error line. */
762 reason = (reason & 0xf) | 4;
763 outb(reason, 0x61);
764}
765
766static notrace __kprobes void
767io_check_error(unsigned char reason, struct pt_regs *regs)
768{
769 printk("NMI: IOCK error (debug interrupt?)\n");
770 show_registers(regs);
771
772 /* Re-enable the IOCK line, wait for a few seconds */
773 reason = (reason & 0xf) | 8;
774 outb(reason, 0x61);
775 mdelay(2000);
776 reason &= ~8;
777 outb(reason, 0x61);
778}
779
780static notrace __kprobes void
781unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
782{
783 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
784 return;
785 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
786 reason);
787 printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
788
789 if (panic_on_unrecovered_nmi)
790 panic("NMI: Not continuing");
791
792 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
793}
794
795/* Runs on IST stack. This code must keep interrupts off all the time.
796 Nested NMIs are prevented by the CPU. */
797asmlinkage notrace __kprobes void default_do_nmi(struct pt_regs *regs)
798{
799 unsigned char reason = 0;
800 int cpu;
801
802 cpu = smp_processor_id();
803
804 /* Only the BSP gets external NMIs from the system. */
805 if (!cpu)
806 reason = get_nmi_reason();
807
808 if (!(reason & 0xc0)) {
809 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
810 == NOTIFY_STOP)
811 return;
812 /*
813 * Ok, so this is none of the documented NMI sources,
814 * so it must be the NMI watchdog.
815 */
816 if (nmi_watchdog_tick(regs, reason))
817 return;
818 if (!do_nmi_callback(regs, cpu))
819 unknown_nmi_error(reason, regs);
820
821 return;
822 }
823 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
824 return;
825
826 /* AK: following checks seem to be broken on modern chipsets. FIXME */
827 if (reason & 0x80)
828 mem_parity_error(reason, regs);
829 if (reason & 0x40)
830 io_check_error(reason, regs);
831}
832
833asmlinkage notrace __kprobes void
834do_nmi(struct pt_regs *regs, long error_code)
835{
836 nmi_enter();
837
838 add_pda(__nmi_count, 1);
839
840 if (!ignore_nmis)
841 default_do_nmi(regs);
842
843 nmi_exit();
844}
845
846void stop_nmi(void)
847{
848 acpi_nmi_disable();
849 ignore_nmis++;
850}
851
852void restart_nmi(void)
853{
854 ignore_nmis--;
855 acpi_nmi_enable();
856}
857
858/* runs on IST stack. */
859asmlinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
860{
861 trace_hardirqs_fixup();
862
863 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
864 == NOTIFY_STOP)
865 return;
866
867 preempt_conditional_sti(regs);
868 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
869 preempt_conditional_cli(regs);
870}
871
872/* Help handler running on IST stack to switch back to user stack
873 for scheduling or signal handling. The actual stack switch is done in
874 entry.S */
875asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
876{
877 struct pt_regs *regs = eregs;
878 /* Did already sync */
879 if (eregs == (struct pt_regs *)eregs->sp)
880 ;
881 /* Exception from user space */
882 else if (user_mode(eregs))
883 regs = task_pt_regs(current);
884 /* Exception from kernel and interrupts are enabled. Move to
885 kernel process stack. */
886 else if (eregs->flags & X86_EFLAGS_IF)
887 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
888 if (eregs != regs)
889 *regs = *eregs;
890 return regs;
891}
892
893/* runs on IST stack. */
894asmlinkage void __kprobes do_debug(struct pt_regs * regs,
895 unsigned long error_code)
896{
897 struct task_struct *tsk = current;
898 unsigned long condition;
899 siginfo_t info;
900
901 trace_hardirqs_fixup();
902
903 get_debugreg(condition, 6);
904
905 /*
906 * The processor cleared BTF, so don't mark that we need it set.
907 */
908 clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
909 tsk->thread.debugctlmsr = 0;
910
911 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
912 SIGTRAP) == NOTIFY_STOP)
913 return;
914
915 preempt_conditional_sti(regs);
916
917 /* Mask out spurious debug traps due to lazy DR7 setting */
918 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
919 if (!tsk->thread.debugreg7)
920 goto clear_dr7;
921 }
922
923 tsk->thread.debugreg6 = condition;
924
925 /*
926 * Single-stepping through TF: make sure we ignore any events in
927 * kernel space (but re-enable TF when returning to user mode).
928 */
929 if (condition & DR_STEP) {
930 if (!user_mode(regs))
931 goto clear_TF_reenable;
932 }
933
934 /* Ok, finally something we can handle */
935 tsk->thread.trap_no = 1;
936 tsk->thread.error_code = error_code;
937 info.si_signo = SIGTRAP;
938 info.si_errno = 0;
939 info.si_code = TRAP_BRKPT;
940 info.si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL;
941 force_sig_info(SIGTRAP, &info, tsk);
942
943clear_dr7:
944 set_debugreg(0, 7);
945 preempt_conditional_cli(regs);
946 return;
947
948clear_TF_reenable:
949 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
950 regs->flags &= ~X86_EFLAGS_TF;
951 preempt_conditional_cli(regs);
952 return;
953}
954
955static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
956{
957 if (fixup_exception(regs))
958 return 1;
959
960 notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
961 /* Illegal floating point operation in the kernel */
962 current->thread.trap_no = trapnr;
963 die(str, regs, 0);
964 return 0;
965}
966
967/*
968 * Note that we play around with the 'TS' bit in an attempt to get
969 * the correct behaviour even in the presence of the asynchronous
970 * IRQ13 behaviour
971 */
972asmlinkage void do_coprocessor_error(struct pt_regs *regs)
973{
974 void __user *ip = (void __user *)(regs->ip);
975 struct task_struct *task;
976 siginfo_t info;
977 unsigned short cwd, swd;
978
979 conditional_sti(regs);
980 if (!user_mode(regs) &&
981 kernel_math_error(regs, "kernel x87 math error", 16))
982 return;
983
984 /*
985 * Save the info for the exception handler and clear the error.
986 */
987 task = current;
988 save_init_fpu(task);
989 task->thread.trap_no = 16;
990 task->thread.error_code = 0;
991 info.si_signo = SIGFPE;
992 info.si_errno = 0;
993 info.si_code = __SI_FAULT;
994 info.si_addr = ip;
995 /*
996 * (~cwd & swd) will mask out exceptions that are not set to unmasked
997 * status. 0x3f is the exception bits in these regs, 0x200 is the
998 * C1 reg you need in case of a stack fault, 0x040 is the stack
999 * fault bit. We should only be taking one exception at a time,
1000 * so if this combination doesn't produce any single exception,
1001 * then we have a bad program that isn't synchronizing its FPU usage
1002 * and it will suffer the consequences since we won't be able to
1003 * fully reproduce the context of the exception
1004 */
1005 cwd = get_fpu_cwd(task);
1006 swd = get_fpu_swd(task);
1007 switch (swd & ~cwd & 0x3f) {
1008 case 0x000: /* No unmasked exception */
1009 default: /* Multiple exceptions */
1010 break;
1011 case 0x001: /* Invalid Op */
1012 /*
1013 * swd & 0x240 == 0x040: Stack Underflow
1014 * swd & 0x240 == 0x240: Stack Overflow
1015 * User must clear the SF bit (0x40) if set
1016 */
1017 info.si_code = FPE_FLTINV;
1018 break;
1019 case 0x002: /* Denormalize */
1020 case 0x010: /* Underflow */
1021 info.si_code = FPE_FLTUND;
1022 break;
1023 case 0x004: /* Zero Divide */
1024 info.si_code = FPE_FLTDIV;
1025 break;
1026 case 0x008: /* Overflow */
1027 info.si_code = FPE_FLTOVF;
1028 break;
1029 case 0x020: /* Precision */
1030 info.si_code = FPE_FLTRES;
1031 break;
1032 }
1033 force_sig_info(SIGFPE, &info, task);
1034}
1035
1036asmlinkage void bad_intr(void)
1037{
1038 printk("bad interrupt");
1039}
1040
1041asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
1042{
1043 void __user *ip = (void __user *)(regs->ip);
1044 struct task_struct *task;
1045 siginfo_t info;
1046 unsigned short mxcsr;
1047
1048 conditional_sti(regs);
1049 if (!user_mode(regs) &&
1050 kernel_math_error(regs, "kernel simd math error", 19))
1051 return;
1052
1053 /*
1054 * Save the info for the exception handler and clear the error.
1055 */
1056 task = current;
1057 save_init_fpu(task);
1058 task->thread.trap_no = 19;
1059 task->thread.error_code = 0;
1060 info.si_signo = SIGFPE;
1061 info.si_errno = 0;
1062 info.si_code = __SI_FAULT;
1063 info.si_addr = ip;
1064 /*
1065 * The SIMD FPU exceptions are handled a little differently, as there
1066 * is only a single status/control register. Thus, to determine which
1067 * unmasked exception was caught we must mask the exception mask bits
1068 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
1069 */
1070 mxcsr = get_fpu_mxcsr(task);
1071 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
1072 case 0x000:
1073 default:
1074 break;
1075 case 0x001: /* Invalid Op */
1076 info.si_code = FPE_FLTINV;
1077 break;
1078 case 0x002: /* Denormalize */
1079 case 0x010: /* Underflow */
1080 info.si_code = FPE_FLTUND;
1081 break;
1082 case 0x004: /* Zero Divide */
1083 info.si_code = FPE_FLTDIV;
1084 break;
1085 case 0x008: /* Overflow */
1086 info.si_code = FPE_FLTOVF;
1087 break;
1088 case 0x020: /* Precision */
1089 info.si_code = FPE_FLTRES;
1090 break;
1091 }
1092 force_sig_info(SIGFPE, &info, task);
1093}
1094
1095asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
1096{
1097}
1098
1099asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
1100{
1101}
1102
1103asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
1104{
1105}
1106
1107/*
1108 * 'math_state_restore()' saves the current math information in the
1109 * old math state array, and gets the new ones from the current task
1110 *
1111 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1112 * Don't touch unless you *really* know how it works.
1113 */
1114asmlinkage void math_state_restore(void)
1115{
1116 struct task_struct *me = current;
1117
1118 if (!used_math()) {
1119 local_irq_enable();
1120 /*
1121 * does a slab alloc which can sleep
1122 */
1123 if (init_fpu(me)) {
1124 /*
1125 * ran out of memory!
1126 */
1127 do_group_exit(SIGKILL);
1128 return;
1129 }
1130 local_irq_disable();
1131 }
1132
1133 clts(); /* Allow maths ops (or we recurse) */
1134 /*
1135 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
1136 */
1137 if (unlikely(restore_fpu_checking(&me->thread.xstate->fxsave))) {
1138 stts();
1139 force_sig(SIGSEGV, me);
1140 return;
1141 }
1142 task_thread_info(me)->status |= TS_USEDFPU;
1143 me->fpu_counter++;
1144}
1145EXPORT_SYMBOL_GPL(math_state_restore);
1146
1147void __init trap_init(void)
1148{
1149 set_intr_gate(0, &divide_error);
1150 set_intr_gate_ist(1, &debug, DEBUG_STACK);
1151 set_intr_gate_ist(2, &nmi, NMI_STACK);
1152 set_system_gate_ist(3, &int3, DEBUG_STACK); /* int3 can be called from all */
1153 set_system_gate(4, &overflow); /* int4 can be called from all */
1154 set_intr_gate(5, &bounds);
1155 set_intr_gate(6, &invalid_op);
1156 set_intr_gate(7, &device_not_available);
1157 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
1158 set_intr_gate(9, &coprocessor_segment_overrun);
1159 set_intr_gate(10, &invalid_TSS);
1160 set_intr_gate(11, &segment_not_present);
1161 set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
1162 set_intr_gate(13, &general_protection);
1163 set_intr_gate(14, &page_fault);
1164 set_intr_gate(15, &spurious_interrupt_bug);
1165 set_intr_gate(16, &coprocessor_error);
1166 set_intr_gate(17, &alignment_check);
1167#ifdef CONFIG_X86_MCE
1168 set_intr_gate_ist(18, &machine_check, MCE_STACK);
1169#endif
1170 set_intr_gate(19, &simd_coprocessor_error);
1171
1172#ifdef CONFIG_IA32_EMULATION
1173 set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
1174#endif
1175 /*
1176 * initialize the per thread extended state:
1177 */
1178 init_thread_xstate();
1179 /*
1180 * Should be a barrier for any external CPU state:
1181 */
1182 cpu_init();
1183}
1184
1185static int __init oops_setup(char *s)
1186{
1187 if (!s)
1188 return -EINVAL;
1189 if (!strcmp(s, "panic"))
1190 panic_on_oops = 1;
1191 return 0;
1192}
1193early_param("oops", oops_setup);
1194
1195static int __init kstack_setup(char *s)
1196{
1197 if (!s)
1198 return -EINVAL;
1199 kstack_depth_to_print = simple_strtoul(s, NULL, 0);
1200 return 0;
1201}
1202early_param("kstack", kstack_setup);
1203
1204static int __init code_bytes_setup(char *s)
1205{
1206 code_bytes = simple_strtoul(s, NULL, 0);
1207 if (code_bytes > 8192)
1208 code_bytes = 8192;
1209
1210 return 1;
1211}
1212__setup("code_bytes=", code_bytes_setup);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 8f98e9de1b82..161bb850fc47 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -104,7 +104,7 @@ __setup("notsc", notsc_setup);
104/* 104/*
105 * Read TSC and the reference counters. Take care of SMI disturbance 105 * Read TSC and the reference counters. Take care of SMI disturbance
106 */ 106 */
107static u64 tsc_read_refs(u64 *pm, u64 *hpet) 107static u64 tsc_read_refs(u64 *p, int hpet)
108{ 108{
109 u64 t1, t2; 109 u64 t1, t2;
110 int i; 110 int i;
@@ -112,9 +112,9 @@ static u64 tsc_read_refs(u64 *pm, u64 *hpet)
112 for (i = 0; i < MAX_RETRIES; i++) { 112 for (i = 0; i < MAX_RETRIES; i++) {
113 t1 = get_cycles(); 113 t1 = get_cycles();
114 if (hpet) 114 if (hpet)
115 *hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; 115 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
116 else 116 else
117 *pm = acpi_pm_read_early(); 117 *p = acpi_pm_read_early();
118 t2 = get_cycles(); 118 t2 = get_cycles();
119 if ((t2 - t1) < SMI_TRESHOLD) 119 if ((t2 - t1) < SMI_TRESHOLD)
120 return t2; 120 return t2;
@@ -123,13 +123,59 @@ static u64 tsc_read_refs(u64 *pm, u64 *hpet)
123} 123}
124 124
125/* 125/*
126 * Calculate the TSC frequency from HPET reference
127 */
128static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
129{
130 u64 tmp;
131
132 if (hpet2 < hpet1)
133 hpet2 += 0x100000000ULL;
134 hpet2 -= hpet1;
135 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
136 do_div(tmp, 1000000);
137 do_div(deltatsc, tmp);
138
139 return (unsigned long) deltatsc;
140}
141
142/*
143 * Calculate the TSC frequency from PMTimer reference
144 */
145static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
146{
147 u64 tmp;
148
149 if (!pm1 && !pm2)
150 return ULONG_MAX;
151
152 if (pm2 < pm1)
153 pm2 += (u64)ACPI_PM_OVRRUN;
154 pm2 -= pm1;
155 tmp = pm2 * 1000000000LL;
156 do_div(tmp, PMTMR_TICKS_PER_SEC);
157 do_div(deltatsc, tmp);
158
159 return (unsigned long) deltatsc;
160}
161
162#define CAL_MS 10
163#define CAL_LATCH (CLOCK_TICK_RATE / (1000 / CAL_MS))
164#define CAL_PIT_LOOPS 1000
165
166#define CAL2_MS 50
167#define CAL2_LATCH (CLOCK_TICK_RATE / (1000 / CAL2_MS))
168#define CAL2_PIT_LOOPS 5000
169
170
171/*
126 * Try to calibrate the TSC against the Programmable 172 * Try to calibrate the TSC against the Programmable
127 * Interrupt Timer and return the frequency of the TSC 173 * Interrupt Timer and return the frequency of the TSC
128 * in kHz. 174 * in kHz.
129 * 175 *
130 * Return ULONG_MAX on failure to calibrate. 176 * Return ULONG_MAX on failure to calibrate.
131 */ 177 */
132static unsigned long pit_calibrate_tsc(void) 178static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
133{ 179{
134 u64 tsc, t1, t2, delta; 180 u64 tsc, t1, t2, delta;
135 unsigned long tscmin, tscmax; 181 unsigned long tscmin, tscmax;
@@ -144,8 +190,8 @@ static unsigned long pit_calibrate_tsc(void)
144 * (LSB then MSB) to begin countdown. 190 * (LSB then MSB) to begin countdown.
145 */ 191 */
146 outb(0xb0, 0x43); 192 outb(0xb0, 0x43);
147 outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); 193 outb(latch & 0xff, 0x42);
148 outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); 194 outb(latch >> 8, 0x42);
149 195
150 tsc = t1 = t2 = get_cycles(); 196 tsc = t1 = t2 = get_cycles();
151 197
@@ -166,31 +212,154 @@ static unsigned long pit_calibrate_tsc(void)
166 /* 212 /*
167 * Sanity checks: 213 * Sanity checks:
168 * 214 *
169 * If we were not able to read the PIT more than 5000 215 * If we were not able to read the PIT more than loopmin
170 * times, then we have been hit by a massive SMI 216 * times, then we have been hit by a massive SMI
171 * 217 *
172 * If the maximum is 10 times larger than the minimum, 218 * If the maximum is 10 times larger than the minimum,
173 * then we got hit by an SMI as well. 219 * then we got hit by an SMI as well.
174 */ 220 */
175 if (pitcnt < 5000 || tscmax > 10 * tscmin) 221 if (pitcnt < loopmin || tscmax > 10 * tscmin)
176 return ULONG_MAX; 222 return ULONG_MAX;
177 223
178 /* Calculate the PIT value */ 224 /* Calculate the PIT value */
179 delta = t2 - t1; 225 delta = t2 - t1;
180 do_div(delta, 50); 226 do_div(delta, ms);
181 return delta; 227 return delta;
182} 228}
183 229
230/*
231 * This reads the current MSB of the PIT counter, and
232 * checks if we are running on sufficiently fast and
233 * non-virtualized hardware.
234 *
235 * Our expectations are:
236 *
237 * - the PIT is running at roughly 1.19MHz
238 *
239 * - each IO is going to take about 1us on real hardware,
240 * but we allow it to be much faster (by a factor of 10) or
241 * _slightly_ slower (ie we allow up to a 2us read+counter
242 * update - anything else implies a unacceptably slow CPU
243 * or PIT for the fast calibration to work.
244 *
245 * - with 256 PIT ticks to read the value, we have 214us to
246 * see the same MSB (and overhead like doing a single TSC
247 * read per MSB value etc).
248 *
249 * - We're doing 2 reads per loop (LSB, MSB), and we expect
250 * them each to take about a microsecond on real hardware.
251 * So we expect a count value of around 100. But we'll be
252 * generous, and accept anything over 50.
253 *
254 * - if the PIT is stuck, and we see *many* more reads, we
255 * return early (and the next caller of pit_expect_msb()
256 * then consider it a failure when they don't see the
257 * next expected value).
258 *
259 * These expectations mean that we know that we have seen the
260 * transition from one expected value to another with a fairly
261 * high accuracy, and we didn't miss any events. We can thus
262 * use the TSC value at the transitions to calculate a pretty
263 * good value for the TSC frequencty.
264 */
265static inline int pit_expect_msb(unsigned char val)
266{
267 int count = 0;
268
269 for (count = 0; count < 50000; count++) {
270 /* Ignore LSB */
271 inb(0x42);
272 if (inb(0x42) != val)
273 break;
274 }
275 return count > 50;
276}
277
278/*
279 * How many MSB values do we want to see? We aim for a
280 * 15ms calibration, which assuming a 2us counter read
281 * error should give us roughly 150 ppm precision for
282 * the calibration.
283 */
284#define QUICK_PIT_MS 15
285#define QUICK_PIT_ITERATIONS (QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
286
287static unsigned long quick_pit_calibrate(void)
288{
289 /* Set the Gate high, disable speaker */
290 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
291
292 /*
293 * Counter 2, mode 0 (one-shot), binary count
294 *
295 * NOTE! Mode 2 decrements by two (and then the
296 * output is flipped each time, giving the same
297 * final output frequency as a decrement-by-one),
298 * so mode 0 is much better when looking at the
299 * individual counts.
300 */
301 outb(0xb0, 0x43);
302
303 /* Start at 0xffff */
304 outb(0xff, 0x42);
305 outb(0xff, 0x42);
306
307 if (pit_expect_msb(0xff)) {
308 int i;
309 u64 t1, t2, delta;
310 unsigned char expect = 0xfe;
311
312 t1 = get_cycles();
313 for (i = 0; i < QUICK_PIT_ITERATIONS; i++, expect--) {
314 if (!pit_expect_msb(expect))
315 goto failed;
316 }
317 t2 = get_cycles();
318
319 /*
320 * Make sure we can rely on the second TSC timestamp:
321 */
322 if (!pit_expect_msb(expect))
323 goto failed;
324
325 /*
326 * Ok, if we get here, then we've seen the
327 * MSB of the PIT decrement QUICK_PIT_ITERATIONS
328 * times, and each MSB had many hits, so we never
329 * had any sudden jumps.
330 *
331 * As a result, we can depend on there not being
332 * any odd delays anywhere, and the TSC reads are
333 * reliable.
334 *
335 * kHz = ticks / time-in-seconds / 1000;
336 * kHz = (t2 - t1) / (QPI * 256 / PIT_TICK_RATE) / 1000
337 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (QPI * 256 * 1000)
338 */
339 delta = (t2 - t1)*PIT_TICK_RATE;
340 do_div(delta, QUICK_PIT_ITERATIONS*256*1000);
341 printk("Fast TSC calibration using PIT\n");
342 return delta;
343 }
344failed:
345 return 0;
346}
184 347
185/** 348/**
186 * native_calibrate_tsc - calibrate the tsc on boot 349 * native_calibrate_tsc - calibrate the tsc on boot
187 */ 350 */
188unsigned long native_calibrate_tsc(void) 351unsigned long native_calibrate_tsc(void)
189{ 352{
190 u64 tsc1, tsc2, delta, pm1, pm2, hpet1, hpet2; 353 u64 tsc1, tsc2, delta, ref1, ref2;
191 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; 354 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
192 unsigned long flags; 355 unsigned long flags, latch, ms, fast_calibrate;
193 int hpet = is_hpet_enabled(), i; 356 int hpet = is_hpet_enabled(), i, loopmin;
357
358 local_irq_save(flags);
359 fast_calibrate = quick_pit_calibrate();
360 local_irq_restore(flags);
361 if (fast_calibrate)
362 return fast_calibrate;
194 363
195 /* 364 /*
196 * Run 5 calibration loops to get the lowest frequency value 365 * Run 5 calibration loops to get the lowest frequency value
@@ -216,7 +385,13 @@ unsigned long native_calibrate_tsc(void)
216 * calibration delay loop as we have to wait for a certain 385 * calibration delay loop as we have to wait for a certain
217 * amount of time anyway. 386 * amount of time anyway.
218 */ 387 */
219 for (i = 0; i < 5; i++) { 388
389 /* Preset PIT loop values */
390 latch = CAL_LATCH;
391 ms = CAL_MS;
392 loopmin = CAL_PIT_LOOPS;
393
394 for (i = 0; i < 3; i++) {
220 unsigned long tsc_pit_khz; 395 unsigned long tsc_pit_khz;
221 396
222 /* 397 /*
@@ -226,16 +401,16 @@ unsigned long native_calibrate_tsc(void)
226 * read the end value. 401 * read the end value.
227 */ 402 */
228 local_irq_save(flags); 403 local_irq_save(flags);
229 tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL); 404 tsc1 = tsc_read_refs(&ref1, hpet);
230 tsc_pit_khz = pit_calibrate_tsc(); 405 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
231 tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); 406 tsc2 = tsc_read_refs(&ref2, hpet);
232 local_irq_restore(flags); 407 local_irq_restore(flags);
233 408
234 /* Pick the lowest PIT TSC calibration so far */ 409 /* Pick the lowest PIT TSC calibration so far */
235 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); 410 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
236 411
237 /* hpet or pmtimer available ? */ 412 /* hpet or pmtimer available ? */
238 if (!hpet && !pm1 && !pm2) 413 if (!hpet && !ref1 && !ref2)
239 continue; 414 continue;
240 415
241 /* Check, whether the sampling was disturbed by an SMI */ 416 /* Check, whether the sampling was disturbed by an SMI */
@@ -243,23 +418,41 @@ unsigned long native_calibrate_tsc(void)
243 continue; 418 continue;
244 419
245 tsc2 = (tsc2 - tsc1) * 1000000LL; 420 tsc2 = (tsc2 - tsc1) * 1000000LL;
421 if (hpet)
422 tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
423 else
424 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
246 425
247 if (hpet) { 426 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
248 if (hpet2 < hpet1) 427
249 hpet2 += 0x100000000ULL; 428 /* Check the reference deviation */
250 hpet2 -= hpet1; 429 delta = ((u64) tsc_pit_min) * 100;
251 tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); 430 do_div(delta, tsc_ref_min);
252 do_div(tsc1, 1000000); 431
253 } else { 432 /*
254 if (pm2 < pm1) 433 * If both calibration results are inside a 10% window
255 pm2 += (u64)ACPI_PM_OVRRUN; 434 * then we can be sure, that the calibration
256 pm2 -= pm1; 435 * succeeded. We break out of the loop right away. We
257 tsc1 = pm2 * 1000000000LL; 436 * use the reference value, as it is more precise.
258 do_div(tsc1, PMTMR_TICKS_PER_SEC); 437 */
438 if (delta >= 90 && delta <= 110) {
439 printk(KERN_INFO
440 "TSC: PIT calibration matches %s. %d loops\n",
441 hpet ? "HPET" : "PMTIMER", i + 1);
442 return tsc_ref_min;
259 } 443 }
260 444
261 do_div(tsc2, tsc1); 445 /*
262 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2); 446 * Check whether PIT failed more than once. This
447 * happens in virtualized environments. We need to
448 * give the virtual PC a slightly longer timeframe for
449 * the HPET/PMTIMER to make the result precise.
450 */
451 if (i == 1 && tsc_pit_min == ULONG_MAX) {
452 latch = CAL2_LATCH;
453 ms = CAL2_MS;
454 loopmin = CAL2_PIT_LOOPS;
455 }
263 } 456 }
264 457
265 /* 458 /*
@@ -270,7 +463,7 @@ unsigned long native_calibrate_tsc(void)
270 printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n"); 463 printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n");
271 464
272 /* We don't have an alternative source, disable TSC */ 465 /* We don't have an alternative source, disable TSC */
273 if (!hpet && !pm1 && !pm2) { 466 if (!hpet && !ref1 && !ref2) {
274 printk("TSC: No reference (HPET/PMTIMER) available\n"); 467 printk("TSC: No reference (HPET/PMTIMER) available\n");
275 return 0; 468 return 0;
276 } 469 }
@@ -278,7 +471,7 @@ unsigned long native_calibrate_tsc(void)
278 /* The alternative source failed as well, disable TSC */ 471 /* The alternative source failed as well, disable TSC */
279 if (tsc_ref_min == ULONG_MAX) { 472 if (tsc_ref_min == ULONG_MAX) {
280 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration " 473 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration "
281 "failed due to SMI disturbance.\n"); 474 "failed.\n");
282 return 0; 475 return 0;
283 } 476 }
284 477
@@ -290,44 +483,25 @@ unsigned long native_calibrate_tsc(void)
290 } 483 }
291 484
292 /* We don't have an alternative source, use the PIT calibration value */ 485 /* We don't have an alternative source, use the PIT calibration value */
293 if (!hpet && !pm1 && !pm2) { 486 if (!hpet && !ref1 && !ref2) {
294 printk(KERN_INFO "TSC: Using PIT calibration value\n"); 487 printk(KERN_INFO "TSC: Using PIT calibration value\n");
295 return tsc_pit_min; 488 return tsc_pit_min;
296 } 489 }
297 490
298 /* The alternative source failed, use the PIT calibration value */ 491 /* The alternative source failed, use the PIT calibration value */
299 if (tsc_ref_min == ULONG_MAX) { 492 if (tsc_ref_min == ULONG_MAX) {
300 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed due " 493 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed. "
301 "to SMI disturbance. Using PIT calibration\n"); 494 "Using PIT calibration\n");
302 return tsc_pit_min; 495 return tsc_pit_min;
303 } 496 }
304 497
305 /* Check the reference deviation */
306 delta = ((u64) tsc_pit_min) * 100;
307 do_div(delta, tsc_ref_min);
308
309 /*
310 * If both calibration results are inside a 5% window, the we
311 * use the lower frequency of those as it is probably the
312 * closest estimate.
313 */
314 if (delta >= 95 && delta <= 105) {
315 printk(KERN_INFO "TSC: PIT calibration confirmed by %s.\n",
316 hpet ? "HPET" : "PMTIMER");
317 printk(KERN_INFO "TSC: using %s calibration value\n",
318 tsc_pit_min <= tsc_ref_min ? "PIT" :
319 hpet ? "HPET" : "PMTIMER");
320 return tsc_pit_min <= tsc_ref_min ? tsc_pit_min : tsc_ref_min;
321 }
322
323 printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n",
324 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
325
326 /* 498 /*
327 * The calibration values differ too much. In doubt, we use 499 * The calibration values differ too much. In doubt, we use
328 * the PIT value as we know that there are PMTIMERs around 500 * the PIT value as we know that there are PMTIMERs around
329 * running at double speed. 501 * running at double speed. At least we let the user know:
330 */ 502 */
503 printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n",
504 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
331 printk(KERN_INFO "TSC: Using PIT calibration value\n"); 505 printk(KERN_INFO "TSC: Using PIT calibration value\n");
332 return tsc_pit_min; 506 return tsc_pit_min;
333} 507}
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c
new file mode 100644
index 000000000000..aeef529917e4
--- /dev/null
+++ b/arch/x86/kernel/uv_irq.c
@@ -0,0 +1,79 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * SGI UV IRQ functions
7 *
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
9 */
10
11#include <linux/module.h>
12#include <linux/irq.h>
13
14#include <asm/apic.h>
15#include <asm/uv/uv_irq.h>
16
17static void uv_noop(unsigned int irq)
18{
19}
20
21static unsigned int uv_noop_ret(unsigned int irq)
22{
23 return 0;
24}
25
26static void uv_ack_apic(unsigned int irq)
27{
28 ack_APIC_irq();
29}
30
31struct irq_chip uv_irq_chip = {
32 .name = "UV-CORE",
33 .startup = uv_noop_ret,
34 .shutdown = uv_noop,
35 .enable = uv_noop,
36 .disable = uv_noop,
37 .ack = uv_noop,
38 .mask = uv_noop,
39 .unmask = uv_noop,
40 .eoi = uv_ack_apic,
41 .end = uv_noop,
42};
43
44/*
45 * Set up a mapping of an available irq and vector, and enable the specified
46 * MMR that defines the MSI that is to be sent to the specified CPU when an
47 * interrupt is raised.
48 */
49int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
50 unsigned long mmr_offset)
51{
52 int irq;
53 int ret;
54
55 irq = create_irq();
56 if (irq <= 0)
57 return -EBUSY;
58
59 ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset);
60 if (ret != irq)
61 destroy_irq(irq);
62
63 return ret;
64}
65EXPORT_SYMBOL_GPL(uv_setup_irq);
66
67/*
68 * Tear down a mapping of an irq and vector, and disable the specified MMR that
69 * defined the MSI that was to be sent to the specified CPU when an interrupt
70 * was raised.
71 *
72 * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
73 */
74void uv_teardown_irq(unsigned int irq, int mmr_blade, unsigned long mmr_offset)
75{
76 arch_disable_uv_irq(mmr_blade, mmr_offset);
77 destroy_irq(irq);
78}
79EXPORT_SYMBOL_GPL(uv_teardown_irq);
diff --git a/arch/x86/kernel/uv_sysfs.c b/arch/x86/kernel/uv_sysfs.c
new file mode 100644
index 000000000000..67f9b9dbf800
--- /dev/null
+++ b/arch/x86/kernel/uv_sysfs.c
@@ -0,0 +1,72 @@
1/*
2 * This file supports the /sys/firmware/sgi_uv interfaces for SGI UV.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
19 * Copyright (c) Russ Anderson
20 */
21
22#include <linux/sysdev.h>
23#include <asm/uv/bios.h>
24
25struct kobject *sgi_uv_kobj;
26
27static ssize_t partition_id_show(struct kobject *kobj,
28 struct kobj_attribute *attr, char *buf)
29{
30 return snprintf(buf, PAGE_SIZE, "%ld\n", sn_partition_id);
31}
32
33static ssize_t coherence_id_show(struct kobject *kobj,
34 struct kobj_attribute *attr, char *buf)
35{
36 return snprintf(buf, PAGE_SIZE, "%ld\n", partition_coherence_id());
37}
38
39static struct kobj_attribute partition_id_attr =
40 __ATTR(partition_id, S_IRUGO, partition_id_show, NULL);
41
42static struct kobj_attribute coherence_id_attr =
43 __ATTR(coherence_id, S_IRUGO, coherence_id_show, NULL);
44
45
46static int __init sgi_uv_sysfs_init(void)
47{
48 unsigned long ret;
49
50 if (!sgi_uv_kobj)
51 sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj);
52 if (!sgi_uv_kobj) {
53 printk(KERN_WARNING "kobject_create_and_add sgi_uv failed \n");
54 return -EINVAL;
55 }
56
57 ret = sysfs_create_file(sgi_uv_kobj, &partition_id_attr.attr);
58 if (ret) {
59 printk(KERN_WARNING "sysfs_create_file partition_id failed \n");
60 return ret;
61 }
62
63 ret = sysfs_create_file(sgi_uv_kobj, &coherence_id_attr.attr);
64 if (ret) {
65 printk(KERN_WARNING "sysfs_create_file coherence_id failed \n");
66 return ret;
67 }
68
69 return 0;
70}
71
72device_initcall(sgi_uv_sysfs_init);
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 594ef47f0a63..0c9667f0752a 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -25,45 +25,31 @@
25#include <asm/visws/cobalt.h> 25#include <asm/visws/cobalt.h>
26#include <asm/visws/piix4.h> 26#include <asm/visws/piix4.h>
27#include <asm/arch_hooks.h> 27#include <asm/arch_hooks.h>
28#include <asm/io_apic.h>
28#include <asm/fixmap.h> 29#include <asm/fixmap.h>
29#include <asm/reboot.h> 30#include <asm/reboot.h>
30#include <asm/setup.h> 31#include <asm/setup.h>
31#include <asm/e820.h> 32#include <asm/e820.h>
32#include <asm/smp.h>
33#include <asm/io.h> 33#include <asm/io.h>
34 34
35#include <mach_ipi.h> 35#include <mach_ipi.h>
36 36
37#include "mach_apic.h" 37#include "mach_apic.h"
38 38
39#include <linux/init.h>
40#include <linux/smp.h>
41
42#include <linux/kernel_stat.h> 39#include <linux/kernel_stat.h>
43#include <linux/interrupt.h>
44#include <linux/init.h>
45 40
46#include <asm/io.h>
47#include <asm/apic.h>
48#include <asm/i8259.h> 41#include <asm/i8259.h>
49#include <asm/irq_vectors.h> 42#include <asm/irq_vectors.h>
50#include <asm/visws/cobalt.h>
51#include <asm/visws/lithium.h> 43#include <asm/visws/lithium.h>
52#include <asm/visws/piix4.h>
53 44
54#include <linux/sched.h> 45#include <linux/sched.h>
55#include <linux/kernel.h> 46#include <linux/kernel.h>
56#include <linux/init.h>
57#include <linux/pci.h> 47#include <linux/pci.h>
58#include <linux/pci_ids.h> 48#include <linux/pci_ids.h>
59 49
60extern int no_broadcast; 50extern int no_broadcast;
61 51
62#include <asm/io.h>
63#include <asm/apic.h> 52#include <asm/apic.h>
64#include <asm/arch_hooks.h>
65#include <asm/visws/cobalt.h>
66#include <asm/visws/lithium.h>
67 53
68char visws_board_type = -1; 54char visws_board_type = -1;
69char visws_board_rev = -1; 55char visws_board_rev = -1;
@@ -498,10 +484,11 @@ static void disable_cobalt_irq(unsigned int irq)
498static unsigned int startup_cobalt_irq(unsigned int irq) 484static unsigned int startup_cobalt_irq(unsigned int irq)
499{ 485{
500 unsigned long flags; 486 unsigned long flags;
487 struct irq_desc *desc = irq_to_desc(irq);
501 488
502 spin_lock_irqsave(&cobalt_lock, flags); 489 spin_lock_irqsave(&cobalt_lock, flags);
503 if ((irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING))) 490 if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING)))
504 irq_desc[irq].status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING); 491 desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING);
505 enable_cobalt_irq(irq); 492 enable_cobalt_irq(irq);
506 spin_unlock_irqrestore(&cobalt_lock, flags); 493 spin_unlock_irqrestore(&cobalt_lock, flags);
507 return 0; 494 return 0;
@@ -520,9 +507,10 @@ static void ack_cobalt_irq(unsigned int irq)
520static void end_cobalt_irq(unsigned int irq) 507static void end_cobalt_irq(unsigned int irq)
521{ 508{
522 unsigned long flags; 509 unsigned long flags;
510 struct irq_desc *desc = irq_to_desc(irq);
523 511
524 spin_lock_irqsave(&cobalt_lock, flags); 512 spin_lock_irqsave(&cobalt_lock, flags);
525 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) 513 if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS)))
526 enable_cobalt_irq(irq); 514 enable_cobalt_irq(irq);
527 spin_unlock_irqrestore(&cobalt_lock, flags); 515 spin_unlock_irqrestore(&cobalt_lock, flags);
528} 516}
@@ -640,12 +628,12 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
640 628
641 spin_unlock_irqrestore(&i8259A_lock, flags); 629 spin_unlock_irqrestore(&i8259A_lock, flags);
642 630
643 desc = irq_desc + realirq; 631 desc = irq_to_desc(realirq);
644 632
645 /* 633 /*
646 * handle this 'virtual interrupt' as a Cobalt one now. 634 * handle this 'virtual interrupt' as a Cobalt one now.
647 */ 635 */
648 kstat_cpu(smp_processor_id()).irqs[realirq]++; 636 kstat_incr_irqs_this_cpu(realirq, desc);
649 637
650 if (likely(desc->action != NULL)) 638 if (likely(desc->action != NULL))
651 handle_IRQ_event(realirq, desc->action); 639 handle_IRQ_event(realirq, desc->action);
@@ -676,27 +664,29 @@ void init_VISWS_APIC_irqs(void)
676 int i; 664 int i;
677 665
678 for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { 666 for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) {
679 irq_desc[i].status = IRQ_DISABLED; 667 struct irq_desc *desc = irq_to_desc(i);
680 irq_desc[i].action = 0; 668
681 irq_desc[i].depth = 1; 669 desc->status = IRQ_DISABLED;
670 desc->action = 0;
671 desc->depth = 1;
682 672
683 if (i == 0) { 673 if (i == 0) {
684 irq_desc[i].chip = &cobalt_irq_type; 674 desc->chip = &cobalt_irq_type;
685 } 675 }
686 else if (i == CO_IRQ_IDE0) { 676 else if (i == CO_IRQ_IDE0) {
687 irq_desc[i].chip = &cobalt_irq_type; 677 desc->chip = &cobalt_irq_type;
688 } 678 }
689 else if (i == CO_IRQ_IDE1) { 679 else if (i == CO_IRQ_IDE1) {
690 irq_desc[i].chip = &cobalt_irq_type; 680 desc->chip = &cobalt_irq_type;
691 } 681 }
692 else if (i == CO_IRQ_8259) { 682 else if (i == CO_IRQ_8259) {
693 irq_desc[i].chip = &piix4_master_irq_type; 683 desc->chip = &piix4_master_irq_type;
694 } 684 }
695 else if (i < CO_IRQ_APIC0) { 685 else if (i < CO_IRQ_APIC0) {
696 irq_desc[i].chip = &piix4_virtual_irq_type; 686 desc->chip = &piix4_virtual_irq_type;
697 } 687 }
698 else if (IS_CO_APIC(i)) { 688 else if (IS_CO_APIC(i)) {
699 irq_desc[i].chip = &cobalt_irq_type; 689 desc->chip = &cobalt_irq_type;
700 } 690 }
701 } 691 }
702 692
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 38f566fa27d2..4eeb5cf9720d 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -46,6 +46,7 @@
46#include <asm/io.h> 46#include <asm/io.h>
47#include <asm/tlbflush.h> 47#include <asm/tlbflush.h>
48#include <asm/irq.h> 48#include <asm/irq.h>
49#include <asm/syscalls.h>
49 50
50/* 51/*
51 * Known problems: 52 * Known problems:
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index edfb09f30479..8b6c393ab9fd 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -393,13 +393,13 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
393} 393}
394#endif 394#endif
395 395
396static void vmi_allocate_pte(struct mm_struct *mm, u32 pfn) 396static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
397{ 397{
398 vmi_set_page_type(pfn, VMI_PAGE_L1); 398 vmi_set_page_type(pfn, VMI_PAGE_L1);
399 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); 399 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
400} 400}
401 401
402static void vmi_allocate_pmd(struct mm_struct *mm, u32 pfn) 402static void vmi_allocate_pmd(struct mm_struct *mm, unsigned long pfn)
403{ 403{
404 /* 404 /*
405 * This call comes in very early, before mem_map is setup. 405 * This call comes in very early, before mem_map is setup.
@@ -410,20 +410,20 @@ static void vmi_allocate_pmd(struct mm_struct *mm, u32 pfn)
410 vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0); 410 vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0);
411} 411}
412 412
413static void vmi_allocate_pmd_clone(u32 pfn, u32 clonepfn, u32 start, u32 count) 413static void vmi_allocate_pmd_clone(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count)
414{ 414{
415 vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE); 415 vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE);
416 vmi_check_page_type(clonepfn, VMI_PAGE_L2); 416 vmi_check_page_type(clonepfn, VMI_PAGE_L2);
417 vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count); 417 vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count);
418} 418}
419 419
420static void vmi_release_pte(u32 pfn) 420static void vmi_release_pte(unsigned long pfn)
421{ 421{
422 vmi_ops.release_page(pfn, VMI_PAGE_L1); 422 vmi_ops.release_page(pfn, VMI_PAGE_L1);
423 vmi_set_page_type(pfn, VMI_PAGE_NORMAL); 423 vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
424} 424}
425 425
426static void vmi_release_pmd(u32 pfn) 426static void vmi_release_pmd(unsigned long pfn)
427{ 427{
428 vmi_ops.release_page(pfn, VMI_PAGE_L2); 428 vmi_ops.release_page(pfn, VMI_PAGE_L2);
429 vmi_set_page_type(pfn, VMI_PAGE_NORMAL); 429 vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
@@ -905,8 +905,8 @@ static inline int __init activate_vmi(void)
905#endif 905#endif
906 906
907#ifdef CONFIG_X86_LOCAL_APIC 907#ifdef CONFIG_X86_LOCAL_APIC
908 para_fill(pv_apic_ops.apic_read, APICRead); 908 para_fill(apic_ops->read, APICRead);
909 para_fill(pv_apic_ops.apic_write, APICWrite); 909 para_fill(apic_ops->write, APICWrite);
910#endif 910#endif
911 911
912 /* 912 /*
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 6953859fe289..254ee07f8635 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -235,11 +235,14 @@ static void __devinit vmi_time_init_clockevent(void)
235 235
236void __init vmi_time_init(void) 236void __init vmi_time_init(void)
237{ 237{
238 unsigned int cpu;
238 /* Disable PIT: BIOSes start PIT CH0 with 18.2hz peridic. */ 239 /* Disable PIT: BIOSes start PIT CH0 with 18.2hz peridic. */
239 outb_pit(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */ 240 outb_pit(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */
240 241
241 vmi_time_init_clockevent(); 242 vmi_time_init_clockevent();
242 setup_irq(0, &vmi_clock_action); 243 setup_irq(0, &vmi_clock_action);
244 for_each_possible_cpu(cpu)
245 per_cpu(vector_irq, cpu)[vmi_get_timer_vector()] = 0;
243} 246}
244 247
245#ifdef CONFIG_X86_LOCAL_APIC 248#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S
index af5bdad84604..a9b8560adbc2 100644
--- a/arch/x86/kernel/vmlinux_32.lds.S
+++ b/arch/x86/kernel/vmlinux_32.lds.S
@@ -140,10 +140,10 @@ SECTIONS
140 *(.con_initcall.init) 140 *(.con_initcall.init)
141 __con_initcall_end = .; 141 __con_initcall_end = .;
142 } 142 }
143 .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) { 143 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
144 __x86cpuvendor_start = .; 144 __x86_cpu_dev_start = .;
145 *(.x86cpuvendor.init) 145 *(.x86_cpu_dev.init)
146 __x86cpuvendor_end = .; 146 __x86_cpu_dev_end = .;
147 } 147 }
148 SECURITY_INIT 148 SECURITY_INIT
149 . = ALIGN(4); 149 . = ALIGN(4);
@@ -180,6 +180,7 @@ SECTIONS
180 . = ALIGN(PAGE_SIZE); 180 . = ALIGN(PAGE_SIZE);
181 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { 181 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
182 __per_cpu_start = .; 182 __per_cpu_start = .;
183 *(.data.percpu.page_aligned)
183 *(.data.percpu) 184 *(.data.percpu)
184 *(.data.percpu.shared_aligned) 185 *(.data.percpu.shared_aligned)
185 __per_cpu_end = .; 186 __per_cpu_end = .;
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index 63e5c1a22e88..46e05447405b 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -168,12 +168,11 @@ SECTIONS
168 *(.con_initcall.init) 168 *(.con_initcall.init)
169 } 169 }
170 __con_initcall_end = .; 170 __con_initcall_end = .;
171 . = ALIGN(16); 171 __x86_cpu_dev_start = .;
172 __x86cpuvendor_start = .; 172 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
173 .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) { 173 *(.x86_cpu_dev.init)
174 *(.x86cpuvendor.init)
175 } 174 }
176 __x86cpuvendor_end = .; 175 __x86_cpu_dev_end = .;
177 SECURITY_INIT 176 SECURITY_INIT
178 177
179 . = ALIGN(8); 178 . = ALIGN(8);
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
new file mode 100644
index 000000000000..9abac8a9d823
--- /dev/null
+++ b/arch/x86/kernel/xsave.c
@@ -0,0 +1,345 @@
1/*
2 * xsave/xrstor support.
3 *
4 * Author: Suresh Siddha <suresh.b.siddha@intel.com>
5 */
6#include <linux/bootmem.h>
7#include <linux/compat.h>
8#include <asm/i387.h>
9#ifdef CONFIG_IA32_EMULATION
10#include <asm/sigcontext32.h>
11#endif
12#include <asm/xcr.h>
13
14/*
15 * Supported feature mask by the CPU and the kernel.
16 */
17u64 pcntxt_mask;
18
19struct _fpx_sw_bytes fx_sw_reserved;
20#ifdef CONFIG_IA32_EMULATION
21struct _fpx_sw_bytes fx_sw_reserved_ia32;
22#endif
23
24/*
25 * Check for the presence of extended state information in the
26 * user fpstate pointer in the sigcontext.
27 */
28int check_for_xstate(struct i387_fxsave_struct __user *buf,
29 void __user *fpstate,
30 struct _fpx_sw_bytes *fx_sw_user)
31{
32 int min_xstate_size = sizeof(struct i387_fxsave_struct) +
33 sizeof(struct xsave_hdr_struct);
34 unsigned int magic2;
35 int err;
36
37 err = __copy_from_user(fx_sw_user, &buf->sw_reserved[0],
38 sizeof(struct _fpx_sw_bytes));
39
40 if (err)
41 return err;
42
43 /*
44 * First Magic check failed.
45 */
46 if (fx_sw_user->magic1 != FP_XSTATE_MAGIC1)
47 return -1;
48
49 /*
50 * Check for error scenarios.
51 */
52 if (fx_sw_user->xstate_size < min_xstate_size ||
53 fx_sw_user->xstate_size > xstate_size ||
54 fx_sw_user->xstate_size > fx_sw_user->extended_size)
55 return -1;
56
57 err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
58 fx_sw_user->extended_size -
59 FP_XSTATE_MAGIC2_SIZE));
60 /*
61 * Check for the presence of second magic word at the end of memory
62 * layout. This detects the case where the user just copied the legacy
63 * fpstate layout with out copying the extended state information
64 * in the memory layout.
65 */
66 if (err || magic2 != FP_XSTATE_MAGIC2)
67 return -1;
68
69 return 0;
70}
71
72#ifdef CONFIG_X86_64
73/*
74 * Signal frame handlers.
75 */
76
77int save_i387_xstate(void __user *buf)
78{
79 struct task_struct *tsk = current;
80 int err = 0;
81
82 if (!access_ok(VERIFY_WRITE, buf, sig_xstate_size))
83 return -EACCES;
84
85 BUG_ON(sig_xstate_size < xstate_size);
86
87 if ((unsigned long)buf % 64)
88 printk("save_i387_xstate: bad fpstate %p\n", buf);
89
90 if (!used_math())
91 return 0;
92 clear_used_math(); /* trigger finit */
93 if (task_thread_info(tsk)->status & TS_USEDFPU) {
94 /*
95 * Start with clearing the user buffer. This will present a
96 * clean context for the bytes not touched by the fxsave/xsave.
97 */
98 err = __clear_user(buf, sig_xstate_size);
99 if (err)
100 return err;
101
102 if (task_thread_info(tsk)->status & TS_XSAVE)
103 err = xsave_user(buf);
104 else
105 err = fxsave_user(buf);
106
107 if (err)
108 return err;
109 task_thread_info(tsk)->status &= ~TS_USEDFPU;
110 stts();
111 } else {
112 if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
113 xstate_size))
114 return -1;
115 }
116
117 if (task_thread_info(tsk)->status & TS_XSAVE) {
118 struct _fpstate __user *fx = buf;
119 struct _xstate __user *x = buf;
120 u64 xstate_bv;
121
122 err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved,
123 sizeof(struct _fpx_sw_bytes));
124
125 err |= __put_user(FP_XSTATE_MAGIC2,
126 (__u32 __user *) (buf + sig_xstate_size
127 - FP_XSTATE_MAGIC2_SIZE));
128
129 /*
130 * Read the xstate_bv which we copied (directly from the cpu or
131 * from the state in task struct) to the user buffers and
132 * set the FP/SSE bits.
133 */
134 err |= __get_user(xstate_bv, &x->xstate_hdr.xstate_bv);
135
136 /*
137 * For legacy compatible, we always set FP/SSE bits in the bit
138 * vector while saving the state to the user context. This will
139 * enable us capturing any changes(during sigreturn) to
140 * the FP/SSE bits by the legacy applications which don't touch
141 * xstate_bv in the xsave header.
142 *
143 * xsave aware apps can change the xstate_bv in the xsave
144 * header as well as change any contents in the memory layout.
145 * xrestore as part of sigreturn will capture all the changes.
146 */
147 xstate_bv |= XSTATE_FPSSE;
148
149 err |= __put_user(xstate_bv, &x->xstate_hdr.xstate_bv);
150
151 if (err)
152 return err;
153 }
154
155 return 1;
156}
157
158/*
159 * Restore the extended state if present. Otherwise, restore the FP/SSE
160 * state.
161 */
162int restore_user_xstate(void __user *buf)
163{
164 struct _fpx_sw_bytes fx_sw_user;
165 u64 mask;
166 int err;
167
168 if (((unsigned long)buf % 64) ||
169 check_for_xstate(buf, buf, &fx_sw_user))
170 goto fx_only;
171
172 mask = fx_sw_user.xstate_bv;
173
174 /*
175 * restore the state passed by the user.
176 */
177 err = xrestore_user(buf, mask);
178 if (err)
179 return err;
180
181 /*
182 * init the state skipped by the user.
183 */
184 mask = pcntxt_mask & ~mask;
185
186 xrstor_state(init_xstate_buf, mask);
187
188 return 0;
189
190fx_only:
191 /*
192 * couldn't find the extended state information in the
193 * memory layout. Restore just the FP/SSE and init all
194 * the other extended state.
195 */
196 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
197 return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
198}
199
200/*
201 * This restores directly out of user space. Exceptions are handled.
202 */
203int restore_i387_xstate(void __user *buf)
204{
205 struct task_struct *tsk = current;
206 int err = 0;
207
208 if (!buf) {
209 if (used_math())
210 goto clear;
211 return 0;
212 } else
213 if (!access_ok(VERIFY_READ, buf, sig_xstate_size))
214 return -EACCES;
215
216 if (!used_math()) {
217 err = init_fpu(tsk);
218 if (err)
219 return err;
220 }
221
222 if (!(task_thread_info(current)->status & TS_USEDFPU)) {
223 clts();
224 task_thread_info(current)->status |= TS_USEDFPU;
225 }
226 if (task_thread_info(tsk)->status & TS_XSAVE)
227 err = restore_user_xstate(buf);
228 else
229 err = fxrstor_checking((__force struct i387_fxsave_struct *)
230 buf);
231 if (unlikely(err)) {
232 /*
233 * Encountered an error while doing the restore from the
234 * user buffer, clear the fpu state.
235 */
236clear:
237 clear_fpu(tsk);
238 clear_used_math();
239 }
240 return err;
241}
242#endif
243
244/*
245 * Prepare the SW reserved portion of the fxsave memory layout, indicating
246 * the presence of the extended state information in the memory layout
247 * pointed by the fpstate pointer in the sigcontext.
248 * This will be saved when ever the FP and extended state context is
249 * saved on the user stack during the signal handler delivery to the user.
250 */
251void prepare_fx_sw_frame(void)
252{
253 int size_extended = (xstate_size - sizeof(struct i387_fxsave_struct)) +
254 FP_XSTATE_MAGIC2_SIZE;
255
256 sig_xstate_size = sizeof(struct _fpstate) + size_extended;
257
258#ifdef CONFIG_IA32_EMULATION
259 sig_xstate_ia32_size = sizeof(struct _fpstate_ia32) + size_extended;
260#endif
261
262 memset(&fx_sw_reserved, 0, sizeof(fx_sw_reserved));
263
264 fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
265 fx_sw_reserved.extended_size = sig_xstate_size;
266 fx_sw_reserved.xstate_bv = pcntxt_mask;
267 fx_sw_reserved.xstate_size = xstate_size;
268#ifdef CONFIG_IA32_EMULATION
269 memcpy(&fx_sw_reserved_ia32, &fx_sw_reserved,
270 sizeof(struct _fpx_sw_bytes));
271 fx_sw_reserved_ia32.extended_size = sig_xstate_ia32_size;
272#endif
273}
274
275/*
276 * Represents init state for the supported extended state.
277 */
278struct xsave_struct *init_xstate_buf;
279
280#ifdef CONFIG_X86_64
281unsigned int sig_xstate_size = sizeof(struct _fpstate);
282#endif
283
284/*
285 * Enable the extended processor state save/restore feature
286 */
287void __cpuinit xsave_init(void)
288{
289 if (!cpu_has_xsave)
290 return;
291
292 set_in_cr4(X86_CR4_OSXSAVE);
293
294 /*
295 * Enable all the features that the HW is capable of
296 * and the Linux kernel is aware of.
297 */
298 xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
299}
300
301/*
302 * setup the xstate image representing the init state
303 */
304static void __init setup_xstate_init(void)
305{
306 init_xstate_buf = alloc_bootmem(xstate_size);
307 init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT;
308}
309
310/*
311 * Enable and initialize the xsave feature.
312 */
313void __init xsave_cntxt_init(void)
314{
315 unsigned int eax, ebx, ecx, edx;
316
317 cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx);
318 pcntxt_mask = eax + ((u64)edx << 32);
319
320 if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
321 printk(KERN_ERR "FP/SSE not shown under xsave features 0x%llx\n",
322 pcntxt_mask);
323 BUG();
324 }
325
326 /*
327 * for now OS knows only about FP/SSE
328 */
329 pcntxt_mask = pcntxt_mask & XCNTXT_MASK;
330 xsave_init();
331
332 /*
333 * Recompute the context size for enabled features
334 */
335 cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx);
336 xstate_size = ebx;
337
338 prepare_fx_sw_frame();
339
340 setup_xstate_init();
341
342 printk(KERN_INFO "xsave/xrstor: enabled xstate_bv 0x%llx, "
343 "cntxt size 0x%x\n",
344 pcntxt_mask, xstate_size);
345}