aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/Makefile42
-rw-r--r--arch/x86/kernel/acpi/boot.c169
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.S4
-rw-r--r--arch/x86/kernel/acpi/sleep.c1
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S2
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S4
-rw-r--r--arch/x86/kernel/alternative.c23
-rw-r--r--arch/x86/kernel/apic/Makefile19
-rw-r--r--arch/x86/kernel/apic/apic.c (renamed from arch/x86/kernel/apic.c)314
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c (renamed from arch/x86/kernel/genapic_flat_64.c)192
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c267
-rw-r--r--arch/x86/kernel/apic/es7000_32.c780
-rw-r--r--arch/x86/kernel/apic/io_apic.c (renamed from arch/x86/kernel/io_apic.c)443
-rw-r--r--arch/x86/kernel/apic/ipi.c164
-rw-r--r--arch/x86/kernel/apic/nmi.c (renamed from arch/x86/kernel/nmi.c)12
-rw-r--r--arch/x86/kernel/apic/numaq_32.c557
-rw-r--r--arch/x86/kernel/apic/probe_32.c284
-rw-r--r--arch/x86/kernel/apic/probe_64.c (renamed from arch/x86/kernel/genapic_64.c)55
-rw-r--r--arch/x86/kernel/apic/summit_32.c579
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c (renamed from arch/x86/kernel/genx2apic_cluster.c)153
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c (renamed from arch/x86/kernel/genx2apic_phys.c)150
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c (renamed from arch/x86/kernel/genx2apic_uv_x.c)161
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/asm-offsets_32.c1
-rw-r--r--arch/x86/kernel/asm-offsets_64.c11
-rw-r--r--arch/x86/kernel/check.c2
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c56
-rw-r--r--arch/x86/kernel/cpu/amd.c56
-rw-r--r--arch/x86/kernel/cpu/centaur.c2
-rw-r--r--arch/x86/kernel/cpu/centaur_64.c2
-rw-r--r--arch/x86/kernel/cpu/common.c579
-rw-r--r--arch/x86/kernel/cpu/cpu.h11
-rwxr-xr-xarch/x86/kernel/cpu/cpu_debug.c839
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/e_powersaver.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c6
-rw-r--r--arch/x86/kernel/cpu/cyrix.c16
-rw-r--r--arch/x86/kernel/cpu/intel.c49
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c71
-rw-r--r--arch/x86/kernel/cpu/mcheck/Makefile1
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_32.c14
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c530
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c43
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel_64.c214
-rw-r--r--arch/x86/kernel/cpu/mcheck/p4.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/threshold.c29
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c2
-rw-r--r--arch/x86/kernel/cpu/proc.c20
-rw-r--r--arch/x86/kernel/cpu/transmeta.c2
-rw-r--r--arch/x86/kernel/cpu/umc.c2
-rw-r--r--arch/x86/kernel/crash.c4
-rw-r--r--arch/x86/kernel/dumpstack.c2
-rw-r--r--arch/x86/kernel/dumpstack_64.c35
-rw-r--r--arch/x86/kernel/e820.c56
-rw-r--r--arch/x86/kernel/early_printk.c22
-rw-r--r--arch/x86/kernel/efi.c2
-rw-r--r--arch/x86/kernel/efi_64.c1
-rw-r--r--arch/x86/kernel/efi_stub_32.S3
-rw-r--r--arch/x86/kernel/efi_stub_64.S7
-rw-r--r--arch/x86/kernel/entry_32.S473
-rw-r--r--arch/x86/kernel/entry_64.S78
-rw-r--r--arch/x86/kernel/es7000_32.c378
-rw-r--r--arch/x86/kernel/head64.c23
-rw-r--r--arch/x86/kernel/head_32.S44
-rw-r--r--arch/x86/kernel/head_64.S23
-rw-r--r--arch/x86/kernel/i8259.c1
-rw-r--r--arch/x86/kernel/ioport.c14
-rw-r--r--arch/x86/kernel/ipi.c190
-rw-r--r--arch/x86/kernel/irq.c111
-rw-r--r--arch/x86/kernel/irq_32.c61
-rw-r--r--arch/x86/kernel/irq_64.c43
-rw-r--r--arch/x86/kernel/irqinit_32.c39
-rw-r--r--arch/x86/kernel/irqinit_64.c3
-rw-r--r--arch/x86/kernel/kgdb.c4
-rw-r--r--arch/x86/kernel/kvmclock.c1
-rw-r--r--arch/x86/kernel/machine_kexec_32.c19
-rw-r--r--arch/x86/kernel/machine_kexec_64.c179
-rw-r--r--arch/x86/kernel/mca_32.c5
-rw-r--r--arch/x86/kernel/microcode_intel.c10
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c2
-rw-r--r--arch/x86/kernel/module_32.c6
-rw-r--r--arch/x86/kernel/module_64.c32
-rw-r--r--arch/x86/kernel/mpparse.c218
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/numaq_32.c293
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c10
-rw-r--r--arch/x86/kernel/paravirt.c56
-rw-r--r--arch/x86/kernel/paravirt_patch_32.c12
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c15
-rw-r--r--arch/x86/kernel/probe_roms_32.c2
-rw-r--r--arch/x86/kernel/process.c193
-rw-r--r--arch/x86/kernel/process_32.c241
-rw-r--r--arch/x86/kernel/process_64.c230
-rw-r--r--arch/x86/kernel/ptrace.c24
-rw-r--r--arch/x86/kernel/quirks.c3
-rw-r--r--arch/x86/kernel/reboot.c5
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S26
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S312
-rw-r--r--arch/x86/kernel/setup.c143
-rw-r--r--arch/x86/kernel/setup_percpu.c679
-rw-r--r--arch/x86/kernel/signal.c463
-rw-r--r--arch/x86/kernel/smp.c15
-rw-r--r--arch/x86/kernel/smpboot.c225
-rw-r--r--arch/x86/kernel/smpcommon.c30
-rw-r--r--arch/x86/kernel/stacktrace.c2
-rw-r--r--arch/x86/kernel/summit_32.c188
-rw-r--r--arch/x86/kernel/syscall_table_32.S20
-rw-r--r--arch/x86/kernel/time_32.c8
-rw-r--r--arch/x86/kernel/tlb_32.c256
-rw-r--r--arch/x86/kernel/tlb_64.c284
-rw-r--r--arch/x86/kernel/tlb_uv.c72
-rw-r--r--arch/x86/kernel/trampoline_32.S2
-rw-r--r--arch/x86/kernel/trampoline_64.S23
-rw-r--r--arch/x86/kernel/traps.c64
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/uv_time.c393
-rw-r--r--arch/x86/kernel/visws_quirks.c12
-rw-r--r--arch/x86/kernel/vm86_32.c20
-rw-r--r--arch/x86/kernel/vmi_32.c13
-rw-r--r--arch/x86/kernel/vmiclock_32.c6
-rw-r--r--arch/x86/kernel/vmlinux_32.lds.S11
-rw-r--r--arch/x86/kernel/vmlinux_64.lds.S44
-rw-r--r--arch/x86/kernel/vsmp_64.c24
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c2
125 files changed, 8313 insertions, 5150 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index d364df03c1d..339ce35648e 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -23,11 +23,12 @@ nostackp := $(call cc-option, -fno-stack-protector)
23CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp) 23CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp)
24CFLAGS_hpet.o := $(nostackp) 24CFLAGS_hpet.o := $(nostackp)
25CFLAGS_tsc.o := $(nostackp) 25CFLAGS_tsc.o := $(nostackp)
26CFLAGS_paravirt.o := $(nostackp)
26 27
27obj-y := process_$(BITS).o signal.o entry_$(BITS).o 28obj-y := process_$(BITS).o signal.o entry_$(BITS).o
28obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o 29obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
29obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o 30obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o
30obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o 31obj-y += setup.o i8259.o irqinit_$(BITS).o
31obj-$(CONFIG_X86_VISWS) += visws_quirks.o 32obj-$(CONFIG_X86_VISWS) += visws_quirks.o
32obj-$(CONFIG_X86_32) += probe_roms_32.o 33obj-$(CONFIG_X86_32) += probe_roms_32.o
33obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o 34obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
@@ -49,31 +50,27 @@ obj-y += step.o
49obj-$(CONFIG_STACKTRACE) += stacktrace.o 50obj-$(CONFIG_STACKTRACE) += stacktrace.o
50obj-y += cpu/ 51obj-y += cpu/
51obj-y += acpi/ 52obj-y += acpi/
52obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o 53obj-y += reboot.o
53obj-$(CONFIG_MCA) += mca_32.o 54obj-$(CONFIG_MCA) += mca_32.o
54obj-$(CONFIG_X86_MSR) += msr.o 55obj-$(CONFIG_X86_MSR) += msr.o
55obj-$(CONFIG_X86_CPUID) += cpuid.o 56obj-$(CONFIG_X86_CPUID) += cpuid.o
56obj-$(CONFIG_PCI) += early-quirks.o 57obj-$(CONFIG_PCI) += early-quirks.o
57apm-y := apm_32.o 58apm-y := apm_32.o
58obj-$(CONFIG_APM) += apm.o 59obj-$(CONFIG_APM) += apm.o
59obj-$(CONFIG_X86_SMP) += smp.o 60obj-$(CONFIG_SMP) += smp.o
60obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o tlb_$(BITS).o 61obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o
61obj-$(CONFIG_X86_32_SMP) += smpcommon.o 62obj-$(CONFIG_SMP) += setup_percpu.o
62obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o 63obj-$(CONFIG_X86_64_SMP) += tsc_sync.o
63obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o 64obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
64obj-$(CONFIG_X86_MPPARSE) += mpparse.o 65obj-$(CONFIG_X86_MPPARSE) += mpparse.o
65obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o 66obj-y += apic/
66obj-$(CONFIG_X86_IO_APIC) += io_apic.o
67obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o 67obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
68obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 68obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
69obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 69obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
70obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 70obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
71obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 71obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
72obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 72obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
73obj-$(CONFIG_X86_NUMAQ) += numaq_32.o 73obj-$(CONFIG_X86_VSMP) += vsmp_64.o
74obj-$(CONFIG_X86_ES7000) += es7000_32.o
75obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o
76obj-y += vsmp_64.o
77obj-$(CONFIG_KPROBES) += kprobes.o 74obj-$(CONFIG_KPROBES) += kprobes.o
78obj-$(CONFIG_MODULES) += module_$(BITS).o 75obj-$(CONFIG_MODULES) += module_$(BITS).o
79obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o 76obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o
@@ -114,16 +111,13 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64
114### 111###
115# 64 bit specific files 112# 64 bit specific files
116ifeq ($(CONFIG_X86_64),y) 113ifeq ($(CONFIG_X86_64),y)
117 obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o 114 obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o
118 obj-y += bios_uv.o uv_irq.o uv_sysfs.o 115 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
119 obj-y += genx2apic_cluster.o 116 obj-$(CONFIG_AUDIT) += audit_64.o
120 obj-y += genx2apic_phys.o 117
121 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o 118 obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o
122 obj-$(CONFIG_AUDIT) += audit_64.o 119 obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
123 120 obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o
124 obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o 121
125 obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o 122 obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
126 obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o
127
128 obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
129endif 123endif
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 7678f10c456..a18eb7ce223 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -37,15 +37,10 @@
37#include <asm/pgtable.h> 37#include <asm/pgtable.h>
38#include <asm/io_apic.h> 38#include <asm/io_apic.h>
39#include <asm/apic.h> 39#include <asm/apic.h>
40#include <asm/genapic.h>
41#include <asm/io.h> 40#include <asm/io.h>
42#include <asm/mpspec.h> 41#include <asm/mpspec.h>
43#include <asm/smp.h> 42#include <asm/smp.h>
44 43
45#ifdef CONFIG_X86_LOCAL_APIC
46# include <mach_apic.h>
47#endif
48
49static int __initdata acpi_force = 0; 44static int __initdata acpi_force = 0;
50u32 acpi_rsdt_forced; 45u32 acpi_rsdt_forced;
51#ifdef CONFIG_ACPI 46#ifdef CONFIG_ACPI
@@ -56,16 +51,7 @@ int acpi_disabled = 1;
56EXPORT_SYMBOL(acpi_disabled); 51EXPORT_SYMBOL(acpi_disabled);
57 52
58#ifdef CONFIG_X86_64 53#ifdef CONFIG_X86_64
59 54# include <asm/proto.h>
60#include <asm/proto.h>
61
62#else /* X86 */
63
64#ifdef CONFIG_X86_LOCAL_APIC
65#include <mach_apic.h>
66#include <mach_mpparse.h>
67#endif /* CONFIG_X86_LOCAL_APIC */
68
69#endif /* X86 */ 55#endif /* X86 */
70 56
71#define BAD_MADT_ENTRY(entry, end) ( \ 57#define BAD_MADT_ENTRY(entry, end) ( \
@@ -121,35 +107,18 @@ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
121 */ 107 */
122char *__init __acpi_map_table(unsigned long phys, unsigned long size) 108char *__init __acpi_map_table(unsigned long phys, unsigned long size)
123{ 109{
124 unsigned long base, offset, mapped_size;
125 int idx;
126 110
127 if (!phys || !size) 111 if (!phys || !size)
128 return NULL; 112 return NULL;
129 113
130 if (phys+size <= (max_low_pfn_mapped << PAGE_SHIFT)) 114 return early_ioremap(phys, size);
131 return __va(phys); 115}
132 116void __init __acpi_unmap_table(char *map, unsigned long size)
133 offset = phys & (PAGE_SIZE - 1); 117{
134 mapped_size = PAGE_SIZE - offset; 118 if (!map || !size)
135 clear_fixmap(FIX_ACPI_END); 119 return;
136 set_fixmap(FIX_ACPI_END, phys);
137 base = fix_to_virt(FIX_ACPI_END);
138
139 /*
140 * Most cases can be covered by the below.
141 */
142 idx = FIX_ACPI_END;
143 while (mapped_size < size) {
144 if (--idx < FIX_ACPI_BEGIN)
145 return NULL; /* cannot handle this */
146 phys += PAGE_SIZE;
147 clear_fixmap(idx);
148 set_fixmap(idx, phys);
149 mapped_size += PAGE_SIZE;
150 }
151 120
152 return ((unsigned char *)base + offset); 121 early_iounmap(map, size);
153} 122}
154 123
155#ifdef CONFIG_PCI_MMCONFIG 124#ifdef CONFIG_PCI_MMCONFIG
@@ -239,7 +208,8 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
239 madt->address); 208 madt->address);
240 } 209 }
241 210
242 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); 211 default_acpi_madt_oem_check(madt->header.oem_id,
212 madt->header.oem_table_id);
243 213
244 return 0; 214 return 0;
245} 215}
@@ -884,7 +854,7 @@ static struct {
884 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); 854 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
885} mp_ioapic_routing[MAX_IO_APICS]; 855} mp_ioapic_routing[MAX_IO_APICS];
886 856
887static int mp_find_ioapic(int gsi) 857int mp_find_ioapic(int gsi)
888{ 858{
889 int i = 0; 859 int i = 0;
890 860
@@ -899,6 +869,16 @@ static int mp_find_ioapic(int gsi)
899 return -1; 869 return -1;
900} 870}
901 871
872int mp_find_ioapic_pin(int ioapic, int gsi)
873{
874 if (WARN_ON(ioapic == -1))
875 return -1;
876 if (WARN_ON(gsi > mp_ioapic_routing[ioapic].gsi_end))
877 return -1;
878
879 return gsi - mp_ioapic_routing[ioapic].gsi_base;
880}
881
902static u8 __init uniq_ioapic_id(u8 id) 882static u8 __init uniq_ioapic_id(u8 id)
903{ 883{
904#ifdef CONFIG_X86_32 884#ifdef CONFIG_X86_32
@@ -912,8 +892,8 @@ static u8 __init uniq_ioapic_id(u8 id)
912 DECLARE_BITMAP(used, 256); 892 DECLARE_BITMAP(used, 256);
913 bitmap_zero(used, 256); 893 bitmap_zero(used, 256);
914 for (i = 0; i < nr_ioapics; i++) { 894 for (i = 0; i < nr_ioapics; i++) {
915 struct mp_config_ioapic *ia = &mp_ioapics[i]; 895 struct mpc_ioapic *ia = &mp_ioapics[i];
916 __set_bit(ia->mp_apicid, used); 896 __set_bit(ia->apicid, used);
917 } 897 }
918 if (!test_bit(id, used)) 898 if (!test_bit(id, used))
919 return id; 899 return id;
@@ -945,29 +925,29 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
945 925
946 idx = nr_ioapics; 926 idx = nr_ioapics;
947 927
948 mp_ioapics[idx].mp_type = MP_IOAPIC; 928 mp_ioapics[idx].type = MP_IOAPIC;
949 mp_ioapics[idx].mp_flags = MPC_APIC_USABLE; 929 mp_ioapics[idx].flags = MPC_APIC_USABLE;
950 mp_ioapics[idx].mp_apicaddr = address; 930 mp_ioapics[idx].apicaddr = address;
951 931
952 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 932 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
953 mp_ioapics[idx].mp_apicid = uniq_ioapic_id(id); 933 mp_ioapics[idx].apicid = uniq_ioapic_id(id);
954#ifdef CONFIG_X86_32 934#ifdef CONFIG_X86_32
955 mp_ioapics[idx].mp_apicver = io_apic_get_version(idx); 935 mp_ioapics[idx].apicver = io_apic_get_version(idx);
956#else 936#else
957 mp_ioapics[idx].mp_apicver = 0; 937 mp_ioapics[idx].apicver = 0;
958#endif 938#endif
959 /* 939 /*
960 * Build basic GSI lookup table to facilitate gsi->io_apic lookups 940 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
961 * and to prevent reprogramming of IOAPIC pins (PCI GSIs). 941 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
962 */ 942 */
963 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mp_apicid; 943 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].apicid;
964 mp_ioapic_routing[idx].gsi_base = gsi_base; 944 mp_ioapic_routing[idx].gsi_base = gsi_base;
965 mp_ioapic_routing[idx].gsi_end = gsi_base + 945 mp_ioapic_routing[idx].gsi_end = gsi_base +
966 io_apic_get_redir_entries(idx); 946 io_apic_get_redir_entries(idx);
967 947
968 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, " 948 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
969 "GSI %d-%d\n", idx, mp_ioapics[idx].mp_apicid, 949 "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
970 mp_ioapics[idx].mp_apicver, mp_ioapics[idx].mp_apicaddr, 950 mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
971 mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end); 951 mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
972 952
973 nr_ioapics++; 953 nr_ioapics++;
@@ -996,19 +976,19 @@ int __init acpi_probe_gsi(void)
996 return max_gsi + 1; 976 return max_gsi + 1;
997} 977}
998 978
999static void assign_to_mp_irq(struct mp_config_intsrc *m, 979static void assign_to_mp_irq(struct mpc_intsrc *m,
1000 struct mp_config_intsrc *mp_irq) 980 struct mpc_intsrc *mp_irq)
1001{ 981{
1002 memcpy(mp_irq, m, sizeof(struct mp_config_intsrc)); 982 memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
1003} 983}
1004 984
1005static int mp_irq_cmp(struct mp_config_intsrc *mp_irq, 985static int mp_irq_cmp(struct mpc_intsrc *mp_irq,
1006 struct mp_config_intsrc *m) 986 struct mpc_intsrc *m)
1007{ 987{
1008 return memcmp(mp_irq, m, sizeof(struct mp_config_intsrc)); 988 return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
1009} 989}
1010 990
1011static void save_mp_irq(struct mp_config_intsrc *m) 991static void save_mp_irq(struct mpc_intsrc *m)
1012{ 992{
1013 int i; 993 int i;
1014 994
@@ -1026,7 +1006,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
1026{ 1006{
1027 int ioapic; 1007 int ioapic;
1028 int pin; 1008 int pin;
1029 struct mp_config_intsrc mp_irq; 1009 struct mpc_intsrc mp_irq;
1030 1010
1031 /* 1011 /*
1032 * Convert 'gsi' to 'ioapic.pin'. 1012 * Convert 'gsi' to 'ioapic.pin'.
@@ -1034,7 +1014,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
1034 ioapic = mp_find_ioapic(gsi); 1014 ioapic = mp_find_ioapic(gsi);
1035 if (ioapic < 0) 1015 if (ioapic < 0)
1036 return; 1016 return;
1037 pin = gsi - mp_ioapic_routing[ioapic].gsi_base; 1017 pin = mp_find_ioapic_pin(ioapic, gsi);
1038 1018
1039 /* 1019 /*
1040 * TBD: This check is for faulty timer entries, where the override 1020 * TBD: This check is for faulty timer entries, where the override
@@ -1044,13 +1024,13 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
1044 if ((bus_irq == 0) && (trigger == 3)) 1024 if ((bus_irq == 0) && (trigger == 3))
1045 trigger = 1; 1025 trigger = 1;
1046 1026
1047 mp_irq.mp_type = MP_INTSRC; 1027 mp_irq.type = MP_INTSRC;
1048 mp_irq.mp_irqtype = mp_INT; 1028 mp_irq.irqtype = mp_INT;
1049 mp_irq.mp_irqflag = (trigger << 2) | polarity; 1029 mp_irq.irqflag = (trigger << 2) | polarity;
1050 mp_irq.mp_srcbus = MP_ISA_BUS; 1030 mp_irq.srcbus = MP_ISA_BUS;
1051 mp_irq.mp_srcbusirq = bus_irq; /* IRQ */ 1031 mp_irq.srcbusirq = bus_irq; /* IRQ */
1052 mp_irq.mp_dstapic = mp_ioapics[ioapic].mp_apicid; /* APIC ID */ 1032 mp_irq.dstapic = mp_ioapics[ioapic].apicid; /* APIC ID */
1053 mp_irq.mp_dstirq = pin; /* INTIN# */ 1033 mp_irq.dstirq = pin; /* INTIN# */
1054 1034
1055 save_mp_irq(&mp_irq); 1035 save_mp_irq(&mp_irq);
1056} 1036}
@@ -1060,7 +1040,7 @@ void __init mp_config_acpi_legacy_irqs(void)
1060 int i; 1040 int i;
1061 int ioapic; 1041 int ioapic;
1062 unsigned int dstapic; 1042 unsigned int dstapic;
1063 struct mp_config_intsrc mp_irq; 1043 struct mpc_intsrc mp_irq;
1064 1044
1065#if defined (CONFIG_MCA) || defined (CONFIG_EISA) 1045#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
1066 /* 1046 /*
@@ -1085,7 +1065,7 @@ void __init mp_config_acpi_legacy_irqs(void)
1085 ioapic = mp_find_ioapic(0); 1065 ioapic = mp_find_ioapic(0);
1086 if (ioapic < 0) 1066 if (ioapic < 0)
1087 return; 1067 return;
1088 dstapic = mp_ioapics[ioapic].mp_apicid; 1068 dstapic = mp_ioapics[ioapic].apicid;
1089 1069
1090 /* 1070 /*
1091 * Use the default configuration for the IRQs 0-15. Unless 1071 * Use the default configuration for the IRQs 0-15. Unless
@@ -1095,16 +1075,14 @@ void __init mp_config_acpi_legacy_irqs(void)
1095 int idx; 1075 int idx;
1096 1076
1097 for (idx = 0; idx < mp_irq_entries; idx++) { 1077 for (idx = 0; idx < mp_irq_entries; idx++) {
1098 struct mp_config_intsrc *irq = mp_irqs + idx; 1078 struct mpc_intsrc *irq = mp_irqs + idx;
1099 1079
1100 /* Do we already have a mapping for this ISA IRQ? */ 1080 /* Do we already have a mapping for this ISA IRQ? */
1101 if (irq->mp_srcbus == MP_ISA_BUS 1081 if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i)
1102 && irq->mp_srcbusirq == i)
1103 break; 1082 break;
1104 1083
1105 /* Do we already have a mapping for this IOAPIC pin */ 1084 /* Do we already have a mapping for this IOAPIC pin */
1106 if (irq->mp_dstapic == dstapic && 1085 if (irq->dstapic == dstapic && irq->dstirq == i)
1107 irq->mp_dstirq == i)
1108 break; 1086 break;
1109 } 1087 }
1110 1088
@@ -1113,13 +1091,13 @@ void __init mp_config_acpi_legacy_irqs(void)
1113 continue; /* IRQ already used */ 1091 continue; /* IRQ already used */
1114 } 1092 }
1115 1093
1116 mp_irq.mp_type = MP_INTSRC; 1094 mp_irq.type = MP_INTSRC;
1117 mp_irq.mp_irqflag = 0; /* Conforming */ 1095 mp_irq.irqflag = 0; /* Conforming */
1118 mp_irq.mp_srcbus = MP_ISA_BUS; 1096 mp_irq.srcbus = MP_ISA_BUS;
1119 mp_irq.mp_dstapic = dstapic; 1097 mp_irq.dstapic = dstapic;
1120 mp_irq.mp_irqtype = mp_INT; 1098 mp_irq.irqtype = mp_INT;
1121 mp_irq.mp_srcbusirq = i; /* Identity mapped */ 1099 mp_irq.srcbusirq = i; /* Identity mapped */
1122 mp_irq.mp_dstirq = i; 1100 mp_irq.dstirq = i;
1123 1101
1124 save_mp_irq(&mp_irq); 1102 save_mp_irq(&mp_irq);
1125 } 1103 }
@@ -1156,7 +1134,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
1156 return gsi; 1134 return gsi;
1157 } 1135 }
1158 1136
1159 ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; 1137 ioapic_pin = mp_find_ioapic_pin(ioapic, gsi);
1160 1138
1161#ifdef CONFIG_X86_32 1139#ifdef CONFIG_X86_32
1162 if (ioapic_renumber_irq) 1140 if (ioapic_renumber_irq)
@@ -1230,22 +1208,22 @@ int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
1230 u32 gsi, int triggering, int polarity) 1208 u32 gsi, int triggering, int polarity)
1231{ 1209{
1232#ifdef CONFIG_X86_MPPARSE 1210#ifdef CONFIG_X86_MPPARSE
1233 struct mp_config_intsrc mp_irq; 1211 struct mpc_intsrc mp_irq;
1234 int ioapic; 1212 int ioapic;
1235 1213
1236 if (!acpi_ioapic) 1214 if (!acpi_ioapic)
1237 return 0; 1215 return 0;
1238 1216
1239 /* print the entry should happen on mptable identically */ 1217 /* print the entry should happen on mptable identically */
1240 mp_irq.mp_type = MP_INTSRC; 1218 mp_irq.type = MP_INTSRC;
1241 mp_irq.mp_irqtype = mp_INT; 1219 mp_irq.irqtype = mp_INT;
1242 mp_irq.mp_irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) | 1220 mp_irq.irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
1243 (polarity == ACPI_ACTIVE_HIGH ? 1 : 3); 1221 (polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
1244 mp_irq.mp_srcbus = number; 1222 mp_irq.srcbus = number;
1245 mp_irq.mp_srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3); 1223 mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
1246 ioapic = mp_find_ioapic(gsi); 1224 ioapic = mp_find_ioapic(gsi);
1247 mp_irq.mp_dstapic = mp_ioapic_routing[ioapic].apic_id; 1225 mp_irq.dstapic = mp_ioapic_routing[ioapic].apic_id;
1248 mp_irq.mp_dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base; 1226 mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi);
1249 1227
1250 save_mp_irq(&mp_irq); 1228 save_mp_irq(&mp_irq);
1251#endif 1229#endif
@@ -1372,7 +1350,7 @@ static void __init acpi_process_madt(void)
1372 if (!error) { 1350 if (!error) {
1373 acpi_lapic = 1; 1351 acpi_lapic = 1;
1374 1352
1375#ifdef CONFIG_X86_GENERICARCH 1353#ifdef CONFIG_X86_BIGSMP
1376 generic_bigsmp_probe(); 1354 generic_bigsmp_probe();
1377#endif 1355#endif
1378 /* 1356 /*
@@ -1384,9 +1362,8 @@ static void __init acpi_process_madt(void)
1384 acpi_ioapic = 1; 1362 acpi_ioapic = 1;
1385 1363
1386 smp_found_config = 1; 1364 smp_found_config = 1;
1387#ifdef CONFIG_X86_32 1365 if (apic->setup_apic_routing)
1388 setup_apic_routing(); 1366 apic->setup_apic_routing();
1389#endif
1390 } 1367 }
1391 } 1368 }
1392 if (error == -EINVAL) { 1369 if (error == -EINVAL) {
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
index 3355973b12a..580b4e29601 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.S
+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
@@ -3,8 +3,8 @@
3 */ 3 */
4#include <asm/segment.h> 4#include <asm/segment.h>
5#include <asm/msr-index.h> 5#include <asm/msr-index.h>
6#include <asm/page.h> 6#include <asm/page_types.h>
7#include <asm/pgtable.h> 7#include <asm/pgtable_types.h>
8#include <asm/processor-flags.h> 8#include <asm/processor-flags.h>
9 9
10 .code16 10 .code16
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index a60c1f3bcb8..7c243a2c511 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -101,6 +101,7 @@ int acpi_save_state_mem(void)
101 stack_start.sp = temp_stack + sizeof(temp_stack); 101 stack_start.sp = temp_stack + sizeof(temp_stack);
102 early_gdt_descr.address = 102 early_gdt_descr.address =
103 (unsigned long)get_cpu_gdt_table(smp_processor_id()); 103 (unsigned long)get_cpu_gdt_table(smp_processor_id());
104 initial_gs = per_cpu_offset(smp_processor_id());
104#endif 105#endif
105 initial_code = (unsigned long)wakeup_long64; 106 initial_code = (unsigned long)wakeup_long64;
106 saved_magic = 0x123456789abcdef0; 107 saved_magic = 0x123456789abcdef0;
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index a12e6a9fb65..8ded418b059 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -1,7 +1,7 @@
1 .section .text.page_aligned 1 .section .text.page_aligned
2#include <linux/linkage.h> 2#include <linux/linkage.h>
3#include <asm/segment.h> 3#include <asm/segment.h>
4#include <asm/page.h> 4#include <asm/page_types.h>
5 5
6# Copyright 2003, 2008 Pavel Machek <pavel@suse.cz>, distribute under GPLv2 6# Copyright 2003, 2008 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
7 7
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 96258d9dc97..8ea5164cbd0 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -1,8 +1,8 @@
1.text 1.text
2#include <linux/linkage.h> 2#include <linux/linkage.h>
3#include <asm/segment.h> 3#include <asm/segment.h>
4#include <asm/pgtable.h> 4#include <asm/pgtable_types.h>
5#include <asm/page.h> 5#include <asm/page_types.h>
6#include <asm/msr.h> 6#include <asm/msr.h>
7#include <asm/asm-offsets.h> 7#include <asm/asm-offsets.h>
8 8
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a84ac7b570e..4c80f155743 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -414,9 +414,17 @@ void __init alternative_instructions(void)
414 that might execute the to be patched code. 414 that might execute the to be patched code.
415 Other CPUs are not running. */ 415 Other CPUs are not running. */
416 stop_nmi(); 416 stop_nmi();
417#ifdef CONFIG_X86_MCE 417
418 stop_mce(); 418 /*
419#endif 419 * Don't stop machine check exceptions while patching.
420 * MCEs only happen when something got corrupted and in this
421 * case we must do something about the corruption.
422 * Ignoring it is worse than a unlikely patching race.
423 * Also machine checks tend to be broadcast and if one CPU
424 * goes into machine check the others follow quickly, so we don't
425 * expect a machine check to cause undue problems during to code
426 * patching.
427 */
420 428
421 apply_alternatives(__alt_instructions, __alt_instructions_end); 429 apply_alternatives(__alt_instructions, __alt_instructions_end);
422 430
@@ -456,9 +464,6 @@ void __init alternative_instructions(void)
456 (unsigned long)__smp_locks_end); 464 (unsigned long)__smp_locks_end);
457 465
458 restart_nmi(); 466 restart_nmi();
459#ifdef CONFIG_X86_MCE
460 restart_mce();
461#endif
462} 467}
463 468
464/** 469/**
@@ -498,12 +503,12 @@ void *text_poke_early(void *addr, const void *opcode, size_t len)
498 */ 503 */
499void *__kprobes text_poke(void *addr, const void *opcode, size_t len) 504void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
500{ 505{
501 unsigned long flags;
502 char *vaddr; 506 char *vaddr;
503 int nr_pages = 2; 507 int nr_pages = 2;
504 struct page *pages[2]; 508 struct page *pages[2];
505 int i; 509 int i;
506 510
511 might_sleep();
507 if (!core_kernel_text((unsigned long)addr)) { 512 if (!core_kernel_text((unsigned long)addr)) {
508 pages[0] = vmalloc_to_page(addr); 513 pages[0] = vmalloc_to_page(addr);
509 pages[1] = vmalloc_to_page(addr + PAGE_SIZE); 514 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
@@ -517,9 +522,9 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
517 nr_pages = 1; 522 nr_pages = 1;
518 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); 523 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
519 BUG_ON(!vaddr); 524 BUG_ON(!vaddr);
520 local_irq_save(flags); 525 local_irq_disable();
521 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); 526 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
522 local_irq_restore(flags); 527 local_irq_enable();
523 vunmap(vaddr); 528 vunmap(vaddr);
524 sync_core(); 529 sync_core();
525 /* Could also do a CLFLUSH here to speed up CPU recovery; but 530 /* Could also do a CLFLUSH here to speed up CPU recovery; but
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile
new file mode 100644
index 00000000000..da7b7b9f8bd
--- /dev/null
+++ b/arch/x86/kernel/apic/Makefile
@@ -0,0 +1,19 @@
1#
2# Makefile for local APIC drivers and for the IO-APIC code
3#
4
5obj-$(CONFIG_X86_LOCAL_APIC) += apic.o probe_$(BITS).o ipi.o nmi.o
6obj-$(CONFIG_X86_IO_APIC) += io_apic.o
7obj-$(CONFIG_SMP) += ipi.o
8
9ifeq ($(CONFIG_X86_64),y)
10obj-y += apic_flat_64.o
11obj-$(CONFIG_X86_X2APIC) += x2apic_cluster.o
12obj-$(CONFIG_X86_X2APIC) += x2apic_phys.o
13obj-$(CONFIG_X86_UV) += x2apic_uv_x.o
14endif
15
16obj-$(CONFIG_X86_BIGSMP) += bigsmp_32.o
17obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
18obj-$(CONFIG_X86_ES7000) += es7000_32.o
19obj-$(CONFIG_X86_SUMMIT) += summit_32.o
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic/apic.c
index 570f36e44e5..30909a258d0 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Local APIC handling, local APIC timers 2 * Local APIC handling, local APIC timers
3 * 3 *
4 * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com> 4 * (c) 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
5 * 5 *
6 * Fixes 6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs; 7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
@@ -14,51 +14,70 @@
14 * Mikael Pettersson : PM converted to driver model. 14 * Mikael Pettersson : PM converted to driver model.
15 */ 15 */
16 16
17#include <linux/init.h>
18
19#include <linux/mm.h>
20#include <linux/delay.h>
21#include <linux/bootmem.h>
22#include <linux/interrupt.h>
23#include <linux/mc146818rtc.h>
24#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
25#include <linux/sysdev.h> 18#include <linux/mc146818rtc.h>
26#include <linux/ioport.h>
27#include <linux/cpu.h>
28#include <linux/clockchips.h>
29#include <linux/acpi_pmtmr.h> 19#include <linux/acpi_pmtmr.h>
20#include <linux/clockchips.h>
21#include <linux/interrupt.h>
22#include <linux/bootmem.h>
23#include <linux/ftrace.h>
24#include <linux/ioport.h>
30#include <linux/module.h> 25#include <linux/module.h>
31#include <linux/dmi.h> 26#include <linux/sysdev.h>
27#include <linux/delay.h>
28#include <linux/timex.h>
32#include <linux/dmar.h> 29#include <linux/dmar.h>
33#include <linux/ftrace.h> 30#include <linux/init.h>
34#include <linux/smp.h> 31#include <linux/cpu.h>
32#include <linux/dmi.h>
35#include <linux/nmi.h> 33#include <linux/nmi.h>
36#include <linux/timex.h> 34#include <linux/smp.h>
35#include <linux/mm.h>
37 36
37#include <asm/pgalloc.h>
38#include <asm/atomic.h> 38#include <asm/atomic.h>
39#include <asm/mtrr.h>
40#include <asm/mpspec.h> 39#include <asm/mpspec.h>
41#include <asm/desc.h>
42#include <asm/arch_hooks.h>
43#include <asm/hpet.h>
44#include <asm/pgalloc.h>
45#include <asm/i8253.h> 40#include <asm/i8253.h>
46#include <asm/idle.h> 41#include <asm/i8259.h>
47#include <asm/proto.h> 42#include <asm/proto.h>
48#include <asm/apic.h> 43#include <asm/apic.h>
49#include <asm/i8259.h> 44#include <asm/desc.h>
45#include <asm/hpet.h>
46#include <asm/idle.h>
47#include <asm/mtrr.h>
50#include <asm/smp.h> 48#include <asm/smp.h>
49#include <asm/mce.h>
50
51unsigned int num_processors;
52
53unsigned disabled_cpus __cpuinitdata;
51 54
52#include <mach_apic.h> 55/* Processor that is doing the boot up */
53#include <mach_apicdef.h> 56unsigned int boot_cpu_physical_apicid = -1U;
54#include <mach_ipi.h>
55 57
56/* 58/*
57 * Sanity check 59 * The highest APIC ID seen during enumeration.
60 *
61 * This determines the messaging protocol we can use: if all APIC IDs
62 * are in the 0 ... 7 range, then we can use logical addressing which
63 * has some performance advantages (better broadcasting).
64 *
65 * If there's an APIC ID above 8, we use physical addressing.
58 */ 66 */
59#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F) 67unsigned int max_physical_apicid;
60# error SPURIOUS_APIC_VECTOR definition error 68
61#endif 69/*
70 * Bitmask of physically existing CPUs:
71 */
72physid_mask_t phys_cpu_present_map;
73
74/*
75 * Map cpu index to physical APIC ID
76 */
77DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
78DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
79EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
80EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
62 81
63#ifdef CONFIG_X86_32 82#ifdef CONFIG_X86_32
64/* 83/*
@@ -92,11 +111,7 @@ static __init int setup_apicpmtimer(char *s)
92__setup("apicpmtimer", setup_apicpmtimer); 111__setup("apicpmtimer", setup_apicpmtimer);
93#endif 112#endif
94 113
95#ifdef CONFIG_X86_64 114#ifdef CONFIG_X86_X2APIC
96#define HAVE_X2APIC
97#endif
98
99#ifdef HAVE_X2APIC
100int x2apic; 115int x2apic;
101/* x2apic enabled before OS handover */ 116/* x2apic enabled before OS handover */
102static int x2apic_preenabled; 117static int x2apic_preenabled;
@@ -194,18 +209,13 @@ static int modern_apic(void)
194 return lapic_get_version() >= 0x14; 209 return lapic_get_version() >= 0x14;
195} 210}
196 211
197/* 212void native_apic_wait_icr_idle(void)
198 * Paravirt kernels also might be using these below ops. So we still
199 * use generic apic_read()/apic_write(), which might be pointing to different
200 * ops in PARAVIRT case.
201 */
202void xapic_wait_icr_idle(void)
203{ 213{
204 while (apic_read(APIC_ICR) & APIC_ICR_BUSY) 214 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
205 cpu_relax(); 215 cpu_relax();
206} 216}
207 217
208u32 safe_xapic_wait_icr_idle(void) 218u32 native_safe_apic_wait_icr_idle(void)
209{ 219{
210 u32 send_status; 220 u32 send_status;
211 int timeout; 221 int timeout;
@@ -221,13 +231,13 @@ u32 safe_xapic_wait_icr_idle(void)
221 return send_status; 231 return send_status;
222} 232}
223 233
224void xapic_icr_write(u32 low, u32 id) 234void native_apic_icr_write(u32 low, u32 id)
225{ 235{
226 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id)); 236 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
227 apic_write(APIC_ICR, low); 237 apic_write(APIC_ICR, low);
228} 238}
229 239
230static u64 xapic_icr_read(void) 240u64 native_apic_icr_read(void)
231{ 241{
232 u32 icr1, icr2; 242 u32 icr1, icr2;
233 243
@@ -237,54 +247,6 @@ static u64 xapic_icr_read(void)
237 return icr1 | ((u64)icr2 << 32); 247 return icr1 | ((u64)icr2 << 32);
238} 248}
239 249
240static struct apic_ops xapic_ops = {
241 .read = native_apic_mem_read,
242 .write = native_apic_mem_write,
243 .icr_read = xapic_icr_read,
244 .icr_write = xapic_icr_write,
245 .wait_icr_idle = xapic_wait_icr_idle,
246 .safe_wait_icr_idle = safe_xapic_wait_icr_idle,
247};
248
249struct apic_ops __read_mostly *apic_ops = &xapic_ops;
250EXPORT_SYMBOL_GPL(apic_ops);
251
252#ifdef HAVE_X2APIC
253static void x2apic_wait_icr_idle(void)
254{
255 /* no need to wait for icr idle in x2apic */
256 return;
257}
258
259static u32 safe_x2apic_wait_icr_idle(void)
260{
261 /* no need to wait for icr idle in x2apic */
262 return 0;
263}
264
265void x2apic_icr_write(u32 low, u32 id)
266{
267 wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
268}
269
270static u64 x2apic_icr_read(void)
271{
272 unsigned long val;
273
274 rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
275 return val;
276}
277
278static struct apic_ops x2apic_ops = {
279 .read = native_apic_msr_read,
280 .write = native_apic_msr_write,
281 .icr_read = x2apic_icr_read,
282 .icr_write = x2apic_icr_write,
283 .wait_icr_idle = x2apic_wait_icr_idle,
284 .safe_wait_icr_idle = safe_x2apic_wait_icr_idle,
285};
286#endif
287
288/** 250/**
289 * enable_NMI_through_LVT0 - enable NMI through local vector table 0 251 * enable_NMI_through_LVT0 - enable NMI through local vector table 0
290 */ 252 */
@@ -457,7 +419,7 @@ static void lapic_timer_setup(enum clock_event_mode mode,
457static void lapic_timer_broadcast(const struct cpumask *mask) 419static void lapic_timer_broadcast(const struct cpumask *mask)
458{ 420{
459#ifdef CONFIG_SMP 421#ifdef CONFIG_SMP
460 send_IPI_mask(mask, LOCAL_TIMER_VECTOR); 422 apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
461#endif 423#endif
462} 424}
463 425
@@ -535,7 +497,8 @@ static void __init lapic_cal_handler(struct clock_event_device *dev)
535 } 497 }
536} 498}
537 499
538static int __init calibrate_by_pmtimer(long deltapm, long *delta) 500static int __init
501calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
539{ 502{
540 const long pm_100ms = PMTMR_TICKS_PER_SEC / 10; 503 const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
541 const long pm_thresh = pm_100ms / 100; 504 const long pm_thresh = pm_100ms / 100;
@@ -546,7 +509,7 @@ static int __init calibrate_by_pmtimer(long deltapm, long *delta)
546 return -1; 509 return -1;
547#endif 510#endif
548 511
549 apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm); 512 apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm);
550 513
551 /* Check, if the PM timer is available */ 514 /* Check, if the PM timer is available */
552 if (!deltapm) 515 if (!deltapm)
@@ -556,19 +519,30 @@ static int __init calibrate_by_pmtimer(long deltapm, long *delta)
556 519
557 if (deltapm > (pm_100ms - pm_thresh) && 520 if (deltapm > (pm_100ms - pm_thresh) &&
558 deltapm < (pm_100ms + pm_thresh)) { 521 deltapm < (pm_100ms + pm_thresh)) {
559 apic_printk(APIC_VERBOSE, "... PM timer result ok\n"); 522 apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n");
560 } else { 523 return 0;
561 res = (((u64)deltapm) * mult) >> 22; 524 }
562 do_div(res, 1000000); 525
563 pr_warning("APIC calibration not consistent " 526 res = (((u64)deltapm) * mult) >> 22;
564 "with PM Timer: %ldms instead of 100ms\n", 527 do_div(res, 1000000);
565 (long)res); 528 pr_warning("APIC calibration not consistent "
566 /* Correct the lapic counter value */ 529 "with PM-Timer: %ldms instead of 100ms\n",(long)res);
567 res = (((u64)(*delta)) * pm_100ms); 530
531 /* Correct the lapic counter value */
532 res = (((u64)(*delta)) * pm_100ms);
533 do_div(res, deltapm);
534 pr_info("APIC delta adjusted to PM-Timer: "
535 "%lu (%ld)\n", (unsigned long)res, *delta);
536 *delta = (long)res;
537
538 /* Correct the tsc counter value */
539 if (cpu_has_tsc) {
540 res = (((u64)(*deltatsc)) * pm_100ms);
568 do_div(res, deltapm); 541 do_div(res, deltapm);
569 pr_info("APIC delta adjusted to PM-Timer: " 542 apic_printk(APIC_VERBOSE, "TSC delta adjusted to "
570 "%lu (%ld)\n", (unsigned long)res, *delta); 543 "PM-Timer: %lu (%ld) \n",
571 *delta = (long)res; 544 (unsigned long)res, *deltatsc);
545 *deltatsc = (long)res;
572 } 546 }
573 547
574 return 0; 548 return 0;
@@ -579,7 +553,7 @@ static int __init calibrate_APIC_clock(void)
579 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 553 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
580 void (*real_handler)(struct clock_event_device *dev); 554 void (*real_handler)(struct clock_event_device *dev);
581 unsigned long deltaj; 555 unsigned long deltaj;
582 long delta; 556 long delta, deltatsc;
583 int pm_referenced = 0; 557 int pm_referenced = 0;
584 558
585 local_irq_disable(); 559 local_irq_disable();
@@ -609,9 +583,11 @@ static int __init calibrate_APIC_clock(void)
609 delta = lapic_cal_t1 - lapic_cal_t2; 583 delta = lapic_cal_t1 - lapic_cal_t2;
610 apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta); 584 apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
611 585
586 deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
587
612 /* we trust the PM based calibration if possible */ 588 /* we trust the PM based calibration if possible */
613 pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1, 589 pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
614 &delta); 590 &delta, &deltatsc);
615 591
616 /* Calculate the scaled math multiplication factor */ 592 /* Calculate the scaled math multiplication factor */
617 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS, 593 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
@@ -629,11 +605,10 @@ static int __init calibrate_APIC_clock(void)
629 calibration_result); 605 calibration_result);
630 606
631 if (cpu_has_tsc) { 607 if (cpu_has_tsc) {
632 delta = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
633 apic_printk(APIC_VERBOSE, "..... CPU clock speed is " 608 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
634 "%ld.%04ld MHz.\n", 609 "%ld.%04ld MHz.\n",
635 (delta / LAPIC_CAL_LOOPS) / (1000000 / HZ), 610 (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
636 (delta / LAPIC_CAL_LOOPS) % (1000000 / HZ)); 611 (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
637 } 612 }
638 613
639 apic_printk(APIC_VERBOSE, "..... host bus clock speed is " 614 apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
@@ -868,6 +843,14 @@ void clear_local_APIC(void)
868 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); 843 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
869 } 844 }
870#endif 845#endif
846#ifdef CONFIG_X86_MCE_INTEL
847 if (maxlvt >= 6) {
848 v = apic_read(APIC_LVTCMCI);
849 if (!(v & APIC_LVT_MASKED))
850 apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED);
851 }
852#endif
853
871 /* 854 /*
872 * Clean APIC state for other OSs: 855 * Clean APIC state for other OSs:
873 */ 856 */
@@ -991,11 +974,11 @@ int __init verify_local_APIC(void)
991 */ 974 */
992 reg0 = apic_read(APIC_ID); 975 reg0 = apic_read(APIC_ID);
993 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); 976 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
994 apic_write(APIC_ID, reg0 ^ APIC_ID_MASK); 977 apic_write(APIC_ID, reg0 ^ apic->apic_id_mask);
995 reg1 = apic_read(APIC_ID); 978 reg1 = apic_read(APIC_ID);
996 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1); 979 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
997 apic_write(APIC_ID, reg0); 980 apic_write(APIC_ID, reg0);
998 if (reg1 != (reg0 ^ APIC_ID_MASK)) 981 if (reg1 != (reg0 ^ apic->apic_id_mask))
999 return 0; 982 return 0;
1000 983
1001 /* 984 /*
@@ -1089,7 +1072,7 @@ static void __cpuinit lapic_setup_esr(void)
1089 return; 1072 return;
1090 } 1073 }
1091 1074
1092 if (esr_disable) { 1075 if (apic->disable_esr) {
1093 /* 1076 /*
1094 * Something untraceable is creating bad interrupts on 1077 * Something untraceable is creating bad interrupts on
1095 * secondary quads ... for the moment, just leave the 1078 * secondary quads ... for the moment, just leave the
@@ -1130,9 +1113,14 @@ void __cpuinit setup_local_APIC(void)
1130 unsigned int value; 1113 unsigned int value;
1131 int i, j; 1114 int i, j;
1132 1115
1116 if (disable_apic) {
1117 arch_disable_smp_support();
1118 return;
1119 }
1120
1133#ifdef CONFIG_X86_32 1121#ifdef CONFIG_X86_32
1134 /* Pound the ESR really hard over the head with a big hammer - mbligh */ 1122 /* Pound the ESR really hard over the head with a big hammer - mbligh */
1135 if (lapic_is_integrated() && esr_disable) { 1123 if (lapic_is_integrated() && apic->disable_esr) {
1136 apic_write(APIC_ESR, 0); 1124 apic_write(APIC_ESR, 0);
1137 apic_write(APIC_ESR, 0); 1125 apic_write(APIC_ESR, 0);
1138 apic_write(APIC_ESR, 0); 1126 apic_write(APIC_ESR, 0);
@@ -1146,7 +1134,7 @@ void __cpuinit setup_local_APIC(void)
1146 * Double-check whether this APIC is really registered. 1134 * Double-check whether this APIC is really registered.
1147 * This is meaningless in clustered apic mode, so we skip it. 1135 * This is meaningless in clustered apic mode, so we skip it.
1148 */ 1136 */
1149 if (!apic_id_registered()) 1137 if (!apic->apic_id_registered())
1150 BUG(); 1138 BUG();
1151 1139
1152 /* 1140 /*
@@ -1154,7 +1142,7 @@ void __cpuinit setup_local_APIC(void)
1154 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel 1142 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
1155 * document number 292116). So here it goes... 1143 * document number 292116). So here it goes...
1156 */ 1144 */
1157 init_apic_ldr(); 1145 apic->init_apic_ldr();
1158 1146
1159 /* 1147 /*
1160 * Set Task Priority to 'accept all'. We never change this 1148 * Set Task Priority to 'accept all'. We never change this
@@ -1262,6 +1250,12 @@ void __cpuinit setup_local_APIC(void)
1262 apic_write(APIC_LVT1, value); 1250 apic_write(APIC_LVT1, value);
1263 1251
1264 preempt_enable(); 1252 preempt_enable();
1253
1254#ifdef CONFIG_X86_MCE_INTEL
1255 /* Recheck CMCI information after local APIC is up on CPU #0 */
1256 if (smp_processor_id() == 0)
1257 cmci_recheck();
1258#endif
1265} 1259}
1266 1260
1267void __cpuinit end_local_APIC_setup(void) 1261void __cpuinit end_local_APIC_setup(void)
@@ -1282,17 +1276,12 @@ void __cpuinit end_local_APIC_setup(void)
1282 apic_pm_activate(); 1276 apic_pm_activate();
1283} 1277}
1284 1278
1285#ifdef HAVE_X2APIC 1279#ifdef CONFIG_X86_X2APIC
1286void check_x2apic(void) 1280void check_x2apic(void)
1287{ 1281{
1288 int msr, msr2; 1282 if (x2apic_enabled()) {
1289
1290 rdmsr(MSR_IA32_APICBASE, msr, msr2);
1291
1292 if (msr & X2APIC_ENABLE) {
1293 pr_info("x2apic enabled by BIOS, switching to x2apic ops\n"); 1283 pr_info("x2apic enabled by BIOS, switching to x2apic ops\n");
1294 x2apic_preenabled = x2apic = 1; 1284 x2apic_preenabled = x2apic = 1;
1295 apic_ops = &x2apic_ops;
1296 } 1285 }
1297} 1286}
1298 1287
@@ -1300,6 +1289,9 @@ void enable_x2apic(void)
1300{ 1289{
1301 int msr, msr2; 1290 int msr, msr2;
1302 1291
1292 if (!x2apic)
1293 return;
1294
1303 rdmsr(MSR_IA32_APICBASE, msr, msr2); 1295 rdmsr(MSR_IA32_APICBASE, msr, msr2);
1304 if (!(msr & X2APIC_ENABLE)) { 1296 if (!(msr & X2APIC_ENABLE)) {
1305 pr_info("Enabling x2apic\n"); 1297 pr_info("Enabling x2apic\n");
@@ -1363,7 +1355,6 @@ void __init enable_IR_x2apic(void)
1363 1355
1364 if (!x2apic) { 1356 if (!x2apic) {
1365 x2apic = 1; 1357 x2apic = 1;
1366 apic_ops = &x2apic_ops;
1367 enable_x2apic(); 1358 enable_x2apic();
1368 } 1359 }
1369 1360
@@ -1401,7 +1392,7 @@ end:
1401 1392
1402 return; 1393 return;
1403} 1394}
1404#endif /* HAVE_X2APIC */ 1395#endif /* CONFIG_X86_X2APIC */
1405 1396
1406#ifdef CONFIG_X86_64 1397#ifdef CONFIG_X86_64
1407/* 1398/*
@@ -1532,7 +1523,7 @@ void __init early_init_lapic_mapping(void)
1532 */ 1523 */
1533void __init init_apic_mappings(void) 1524void __init init_apic_mappings(void)
1534{ 1525{
1535#ifdef HAVE_X2APIC 1526#ifdef CONFIG_X86_X2APIC
1536 if (x2apic) { 1527 if (x2apic) {
1537 boot_cpu_physical_apicid = read_apic_id(); 1528 boot_cpu_physical_apicid = read_apic_id();
1538 return; 1529 return;
@@ -1570,11 +1561,11 @@ int apic_version[MAX_APICS];
1570 1561
1571int __init APIC_init_uniprocessor(void) 1562int __init APIC_init_uniprocessor(void)
1572{ 1563{
1573#ifdef CONFIG_X86_64
1574 if (disable_apic) { 1564 if (disable_apic) {
1575 pr_info("Apic disabled\n"); 1565 pr_info("Apic disabled\n");
1576 return -1; 1566 return -1;
1577 } 1567 }
1568#ifdef CONFIG_X86_64
1578 if (!cpu_has_apic) { 1569 if (!cpu_has_apic) {
1579 disable_apic = 1; 1570 disable_apic = 1;
1580 pr_info("Apic disabled by BIOS\n"); 1571 pr_info("Apic disabled by BIOS\n");
@@ -1596,11 +1587,9 @@ int __init APIC_init_uniprocessor(void)
1596 } 1587 }
1597#endif 1588#endif
1598 1589
1599#ifdef HAVE_X2APIC
1600 enable_IR_x2apic(); 1590 enable_IR_x2apic();
1601#endif
1602#ifdef CONFIG_X86_64 1591#ifdef CONFIG_X86_64
1603 setup_apic_routing(); 1592 default_setup_apic_routing();
1604#endif 1593#endif
1605 1594
1606 verify_local_APIC(); 1595 verify_local_APIC();
@@ -1621,35 +1610,31 @@ int __init APIC_init_uniprocessor(void)
1621 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); 1610 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1622 setup_local_APIC(); 1611 setup_local_APIC();
1623 1612
1624#ifdef CONFIG_X86_64 1613#ifdef CONFIG_X86_IO_APIC
1625 /* 1614 /*
1626 * Now enable IO-APICs, actually call clear_IO_APIC 1615 * Now enable IO-APICs, actually call clear_IO_APIC
1627 * We need clear_IO_APIC before enabling vector on BP 1616 * We need clear_IO_APIC before enabling error vector
1628 */ 1617 */
1629 if (!skip_ioapic_setup && nr_ioapics) 1618 if (!skip_ioapic_setup && nr_ioapics)
1630 enable_IO_APIC(); 1619 enable_IO_APIC();
1631#endif 1620#endif
1632 1621
1633#ifdef CONFIG_X86_IO_APIC
1634 if (!smp_found_config || skip_ioapic_setup || !nr_ioapics)
1635#endif
1636 localise_nmi_watchdog();
1637 end_local_APIC_setup(); 1622 end_local_APIC_setup();
1638 1623
1639#ifdef CONFIG_X86_IO_APIC 1624#ifdef CONFIG_X86_IO_APIC
1640 if (smp_found_config && !skip_ioapic_setup && nr_ioapics) 1625 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1641 setup_IO_APIC(); 1626 setup_IO_APIC();
1642# ifdef CONFIG_X86_64 1627 else {
1643 else
1644 nr_ioapics = 0; 1628 nr_ioapics = 0;
1645# endif 1629 localise_nmi_watchdog();
1630 }
1631#else
1632 localise_nmi_watchdog();
1646#endif 1633#endif
1647 1634
1635 setup_boot_clock();
1648#ifdef CONFIG_X86_64 1636#ifdef CONFIG_X86_64
1649 setup_boot_APIC_clock();
1650 check_nmi_watchdog(); 1637 check_nmi_watchdog();
1651#else
1652 setup_boot_clock();
1653#endif 1638#endif
1654 1639
1655 return 0; 1640 return 0;
@@ -1738,7 +1723,8 @@ void __init connect_bsp_APIC(void)
1738 outb(0x01, 0x23); 1723 outb(0x01, 0x23);
1739 } 1724 }
1740#endif 1725#endif
1741 enable_apic_mode(); 1726 if (apic->enable_apic_mode)
1727 apic->enable_apic_mode();
1742} 1728}
1743 1729
1744/** 1730/**
@@ -1876,29 +1862,39 @@ void __cpuinit generic_processor_info(int apicid, int version)
1876 } 1862 }
1877#endif 1863#endif
1878 1864
1879#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64) 1865#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
1880 /* are we being called early in kernel startup? */ 1866 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1881 if (early_per_cpu_ptr(x86_cpu_to_apicid)) { 1867 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1882 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
1883 u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
1884
1885 cpu_to_apicid[cpu] = apicid;
1886 bios_cpu_apicid[cpu] = apicid;
1887 } else {
1888 per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1889 per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1890 }
1891#endif 1868#endif
1892 1869
1893 set_cpu_possible(cpu, true); 1870 set_cpu_possible(cpu, true);
1894 set_cpu_present(cpu, true); 1871 set_cpu_present(cpu, true);
1895} 1872}
1896 1873
1897#ifdef CONFIG_X86_64
1898int hard_smp_processor_id(void) 1874int hard_smp_processor_id(void)
1899{ 1875{
1900 return read_apic_id(); 1876 return read_apic_id();
1901} 1877}
1878
1879void default_init_apic_ldr(void)
1880{
1881 unsigned long val;
1882
1883 apic_write(APIC_DFR, APIC_DFR_VALUE);
1884 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
1885 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
1886 apic_write(APIC_LDR, val);
1887}
1888
1889#ifdef CONFIG_X86_32
1890int default_apicid_to_node(int logical_apicid)
1891{
1892#ifdef CONFIG_SMP
1893 return apicid_2_node[hard_smp_processor_id()];
1894#else
1895 return 0;
1896#endif
1897}
1902#endif 1898#endif
1903 1899
1904/* 1900/*
@@ -1976,7 +1972,7 @@ static int lapic_resume(struct sys_device *dev)
1976 1972
1977 local_irq_save(flags); 1973 local_irq_save(flags);
1978 1974
1979#ifdef HAVE_X2APIC 1975#ifdef CONFIG_X86_X2APIC
1980 if (x2apic) 1976 if (x2apic)
1981 enable_x2apic(); 1977 enable_x2apic();
1982 else 1978 else
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 34185488e4f..f933822dba1 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -17,9 +17,8 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/hardirq.h> 18#include <linux/hardirq.h>
19#include <asm/smp.h> 19#include <asm/smp.h>
20#include <asm/apic.h>
20#include <asm/ipi.h> 21#include <asm/ipi.h>
21#include <asm/genapic.h>
22#include <mach_apicdef.h>
23 22
24#ifdef CONFIG_ACPI 23#ifdef CONFIG_ACPI
25#include <acpi/acpi_bus.h> 24#include <acpi/acpi_bus.h>
@@ -74,7 +73,7 @@ static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
74 unsigned long flags; 73 unsigned long flags;
75 74
76 local_irq_save(flags); 75 local_irq_save(flags);
77 __send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL); 76 __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
78 local_irq_restore(flags); 77 local_irq_restore(flags);
79} 78}
80 79
@@ -85,14 +84,15 @@ static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
85 _flat_send_IPI_mask(mask, vector); 84 _flat_send_IPI_mask(mask, vector);
86} 85}
87 86
88static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, 87static void
89 int vector) 88 flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
90{ 89{
91 unsigned long mask = cpumask_bits(cpumask)[0]; 90 unsigned long mask = cpumask_bits(cpumask)[0];
92 int cpu = smp_processor_id(); 91 int cpu = smp_processor_id();
93 92
94 if (cpu < BITS_PER_LONG) 93 if (cpu < BITS_PER_LONG)
95 clear_bit(cpu, &mask); 94 clear_bit(cpu, &mask);
95
96 _flat_send_IPI_mask(mask, vector); 96 _flat_send_IPI_mask(mask, vector);
97} 97}
98 98
@@ -114,23 +114,27 @@ static void flat_send_IPI_allbutself(int vector)
114 _flat_send_IPI_mask(mask, vector); 114 _flat_send_IPI_mask(mask, vector);
115 } 115 }
116 } else if (num_online_cpus() > 1) { 116 } else if (num_online_cpus() > 1) {
117 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); 117 __default_send_IPI_shortcut(APIC_DEST_ALLBUT,
118 vector, apic->dest_logical);
118 } 119 }
119} 120}
120 121
121static void flat_send_IPI_all(int vector) 122static void flat_send_IPI_all(int vector)
122{ 123{
123 if (vector == NMI_VECTOR) 124 if (vector == NMI_VECTOR) {
124 flat_send_IPI_mask(cpu_online_mask, vector); 125 flat_send_IPI_mask(cpu_online_mask, vector);
125 else 126 } else {
126 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); 127 __default_send_IPI_shortcut(APIC_DEST_ALLINC,
128 vector, apic->dest_logical);
129 }
127} 130}
128 131
129static unsigned int get_apic_id(unsigned long x) 132static unsigned int flat_get_apic_id(unsigned long x)
130{ 133{
131 unsigned int id; 134 unsigned int id;
132 135
133 id = (((x)>>24) & 0xFFu); 136 id = (((x)>>24) & 0xFFu);
137
134 return id; 138 return id;
135} 139}
136 140
@@ -146,7 +150,7 @@ static unsigned int read_xapic_id(void)
146{ 150{
147 unsigned int id; 151 unsigned int id;
148 152
149 id = get_apic_id(apic_read(APIC_ID)); 153 id = flat_get_apic_id(apic_read(APIC_ID));
150 return id; 154 return id;
151} 155}
152 156
@@ -169,31 +173,67 @@ static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
169 return mask1 & mask2; 173 return mask1 & mask2;
170} 174}
171 175
172static unsigned int phys_pkg_id(int index_msb) 176static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
173{ 177{
174 return hard_smp_processor_id() >> index_msb; 178 return hard_smp_processor_id() >> index_msb;
175} 179}
176 180
177struct genapic apic_flat = { 181struct apic apic_flat = {
178 .name = "flat", 182 .name = "flat",
179 .acpi_madt_oem_check = flat_acpi_madt_oem_check, 183 .probe = NULL,
180 .int_delivery_mode = dest_LowestPrio, 184 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
181 .int_dest_mode = (APIC_DEST_LOGICAL != 0), 185 .apic_id_registered = flat_apic_id_registered,
182 .target_cpus = flat_target_cpus, 186
183 .vector_allocation_domain = flat_vector_allocation_domain, 187 .irq_delivery_mode = dest_LowestPrio,
184 .apic_id_registered = flat_apic_id_registered, 188 .irq_dest_mode = 1, /* logical */
185 .init_apic_ldr = flat_init_apic_ldr, 189
186 .send_IPI_all = flat_send_IPI_all, 190 .target_cpus = flat_target_cpus,
187 .send_IPI_allbutself = flat_send_IPI_allbutself, 191 .disable_esr = 0,
188 .send_IPI_mask = flat_send_IPI_mask, 192 .dest_logical = APIC_DEST_LOGICAL,
189 .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, 193 .check_apicid_used = NULL,
190 .send_IPI_self = apic_send_IPI_self, 194 .check_apicid_present = NULL,
191 .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, 195
192 .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, 196 .vector_allocation_domain = flat_vector_allocation_domain,
193 .phys_pkg_id = phys_pkg_id, 197 .init_apic_ldr = flat_init_apic_ldr,
194 .get_apic_id = get_apic_id, 198
195 .set_apic_id = set_apic_id, 199 .ioapic_phys_id_map = NULL,
196 .apic_id_mask = (0xFFu<<24), 200 .setup_apic_routing = NULL,
201 .multi_timer_check = NULL,
202 .apicid_to_node = NULL,
203 .cpu_to_logical_apicid = NULL,
204 .cpu_present_to_apicid = default_cpu_present_to_apicid,
205 .apicid_to_cpu_present = NULL,
206 .setup_portio_remap = NULL,
207 .check_phys_apicid_present = default_check_phys_apicid_present,
208 .enable_apic_mode = NULL,
209 .phys_pkg_id = flat_phys_pkg_id,
210 .mps_oem_check = NULL,
211
212 .get_apic_id = flat_get_apic_id,
213 .set_apic_id = set_apic_id,
214 .apic_id_mask = 0xFFu << 24,
215
216 .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
217 .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
218
219 .send_IPI_mask = flat_send_IPI_mask,
220 .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
221 .send_IPI_allbutself = flat_send_IPI_allbutself,
222 .send_IPI_all = flat_send_IPI_all,
223 .send_IPI_self = apic_send_IPI_self,
224
225 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
226 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
227 .wait_for_init_deassert = NULL,
228 .smp_callin_clear_local_apic = NULL,
229 .inquire_remote_apic = NULL,
230
231 .read = native_apic_mem_read,
232 .write = native_apic_mem_write,
233 .icr_read = native_apic_icr_read,
234 .icr_write = native_apic_icr_write,
235 .wait_icr_idle = native_apic_wait_icr_idle,
236 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
197}; 237};
198 238
199/* 239/*
@@ -232,18 +272,18 @@ static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
232 272
233static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) 273static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
234{ 274{
235 send_IPI_mask_sequence(cpumask, vector); 275 default_send_IPI_mask_sequence_phys(cpumask, vector);
236} 276}
237 277
238static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask, 278static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
239 int vector) 279 int vector)
240{ 280{
241 send_IPI_mask_allbutself(cpumask, vector); 281 default_send_IPI_mask_allbutself_phys(cpumask, vector);
242} 282}
243 283
244static void physflat_send_IPI_allbutself(int vector) 284static void physflat_send_IPI_allbutself(int vector)
245{ 285{
246 send_IPI_mask_allbutself(cpu_online_mask, vector); 286 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
247} 287}
248 288
249static void physflat_send_IPI_all(int vector) 289static void physflat_send_IPI_all(int vector)
@@ -276,32 +316,72 @@ physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
276 * We're using fixed IRQ delivery, can only return one phys APIC ID. 316 * We're using fixed IRQ delivery, can only return one phys APIC ID.
277 * May as well be the first. 317 * May as well be the first.
278 */ 318 */
279 for_each_cpu_and(cpu, cpumask, andmask) 319 for_each_cpu_and(cpu, cpumask, andmask) {
280 if (cpumask_test_cpu(cpu, cpu_online_mask)) 320 if (cpumask_test_cpu(cpu, cpu_online_mask))
281 break; 321 break;
322 }
282 if (cpu < nr_cpu_ids) 323 if (cpu < nr_cpu_ids)
283 return per_cpu(x86_cpu_to_apicid, cpu); 324 return per_cpu(x86_cpu_to_apicid, cpu);
325
284 return BAD_APICID; 326 return BAD_APICID;
285} 327}
286 328
287struct genapic apic_physflat = { 329struct apic apic_physflat = {
288 .name = "physical flat", 330
289 .acpi_madt_oem_check = physflat_acpi_madt_oem_check, 331 .name = "physical flat",
290 .int_delivery_mode = dest_Fixed, 332 .probe = NULL,
291 .int_dest_mode = (APIC_DEST_PHYSICAL != 0), 333 .acpi_madt_oem_check = physflat_acpi_madt_oem_check,
292 .target_cpus = physflat_target_cpus, 334 .apic_id_registered = flat_apic_id_registered,
293 .vector_allocation_domain = physflat_vector_allocation_domain, 335
294 .apic_id_registered = flat_apic_id_registered, 336 .irq_delivery_mode = dest_Fixed,
295 .init_apic_ldr = flat_init_apic_ldr,/*not needed, but shouldn't hurt*/ 337 .irq_dest_mode = 0, /* physical */
296 .send_IPI_all = physflat_send_IPI_all, 338
297 .send_IPI_allbutself = physflat_send_IPI_allbutself, 339 .target_cpus = physflat_target_cpus,
298 .send_IPI_mask = physflat_send_IPI_mask, 340 .disable_esr = 0,
299 .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, 341 .dest_logical = 0,
300 .send_IPI_self = apic_send_IPI_self, 342 .check_apicid_used = NULL,
301 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, 343 .check_apicid_present = NULL,
302 .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and, 344
303 .phys_pkg_id = phys_pkg_id, 345 .vector_allocation_domain = physflat_vector_allocation_domain,
304 .get_apic_id = get_apic_id, 346 /* not needed, but shouldn't hurt: */
305 .set_apic_id = set_apic_id, 347 .init_apic_ldr = flat_init_apic_ldr,
306 .apic_id_mask = (0xFFu<<24), 348
349 .ioapic_phys_id_map = NULL,
350 .setup_apic_routing = NULL,
351 .multi_timer_check = NULL,
352 .apicid_to_node = NULL,
353 .cpu_to_logical_apicid = NULL,
354 .cpu_present_to_apicid = default_cpu_present_to_apicid,
355 .apicid_to_cpu_present = NULL,
356 .setup_portio_remap = NULL,
357 .check_phys_apicid_present = default_check_phys_apicid_present,
358 .enable_apic_mode = NULL,
359 .phys_pkg_id = flat_phys_pkg_id,
360 .mps_oem_check = NULL,
361
362 .get_apic_id = flat_get_apic_id,
363 .set_apic_id = set_apic_id,
364 .apic_id_mask = 0xFFu << 24,
365
366 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
367 .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
368
369 .send_IPI_mask = physflat_send_IPI_mask,
370 .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
371 .send_IPI_allbutself = physflat_send_IPI_allbutself,
372 .send_IPI_all = physflat_send_IPI_all,
373 .send_IPI_self = apic_send_IPI_self,
374
375 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
376 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
377 .wait_for_init_deassert = NULL,
378 .smp_callin_clear_local_apic = NULL,
379 .inquire_remote_apic = NULL,
380
381 .read = native_apic_mem_read,
382 .write = native_apic_mem_write,
383 .icr_read = native_apic_icr_read,
384 .icr_write = native_apic_icr_write,
385 .wait_icr_idle = native_apic_wait_icr_idle,
386 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
307}; 387};
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
new file mode 100644
index 00000000000..d806ecaa948
--- /dev/null
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -0,0 +1,267 @@
1/*
2 * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
3 *
4 * Drives the local APIC in "clustered mode".
5 */
6#include <linux/threads.h>
7#include <linux/cpumask.h>
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/dmi.h>
11#include <linux/smp.h>
12
13#include <asm/apicdef.h>
14#include <asm/fixmap.h>
15#include <asm/mpspec.h>
16#include <asm/apic.h>
17#include <asm/ipi.h>
18
19static unsigned bigsmp_get_apic_id(unsigned long x)
20{
21 return (x >> 24) & 0xFF;
22}
23
24static int bigsmp_apic_id_registered(void)
25{
26 return 1;
27}
28
29static const cpumask_t *bigsmp_target_cpus(void)
30{
31#ifdef CONFIG_SMP
32 return &cpu_online_map;
33#else
34 return &cpumask_of_cpu(0);
35#endif
36}
37
38static unsigned long bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid)
39{
40 return 0;
41}
42
43static unsigned long bigsmp_check_apicid_present(int bit)
44{
45 return 1;
46}
47
48static inline unsigned long calculate_ldr(int cpu)
49{
50 unsigned long val, id;
51
52 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
53 id = per_cpu(x86_bios_cpu_apicid, cpu);
54 val |= SET_APIC_LOGICAL_ID(id);
55
56 return val;
57}
58
59/*
60 * Set up the logical destination ID.
61 *
62 * Intel recommends to set DFR, LDR and TPR before enabling
63 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
64 * document number 292116). So here it goes...
65 */
66static void bigsmp_init_apic_ldr(void)
67{
68 unsigned long val;
69 int cpu = smp_processor_id();
70
71 apic_write(APIC_DFR, APIC_DFR_FLAT);
72 val = calculate_ldr(cpu);
73 apic_write(APIC_LDR, val);
74}
75
76static void bigsmp_setup_apic_routing(void)
77{
78 printk(KERN_INFO
79 "Enabling APIC mode: Physflat. Using %d I/O APICs\n",
80 nr_ioapics);
81}
82
83static int bigsmp_apicid_to_node(int logical_apicid)
84{
85 return apicid_2_node[hard_smp_processor_id()];
86}
87
88static int bigsmp_cpu_present_to_apicid(int mps_cpu)
89{
90 if (mps_cpu < nr_cpu_ids)
91 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
92
93 return BAD_APICID;
94}
95
96static physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid)
97{
98 return physid_mask_of_physid(phys_apicid);
99}
100
101/* Mapping from cpu number to logical apicid */
102static inline int bigsmp_cpu_to_logical_apicid(int cpu)
103{
104 if (cpu >= nr_cpu_ids)
105 return BAD_APICID;
106 return cpu_physical_id(cpu);
107}
108
109static physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map)
110{
111 /* For clustered we don't have a good way to do this yet - hack */
112 return physids_promote(0xFFL);
113}
114
115static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
116{
117 return 1;
118}
119
120/* As we are using single CPU as destination, pick only one CPU here */
121static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask)
122{
123 return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask));
124}
125
126static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
127 const struct cpumask *andmask)
128{
129 int cpu;
130
131 /*
132 * We're using fixed IRQ delivery, can only return one phys APIC ID.
133 * May as well be the first.
134 */
135 for_each_cpu_and(cpu, cpumask, andmask) {
136 if (cpumask_test_cpu(cpu, cpu_online_mask))
137 break;
138 }
139 if (cpu < nr_cpu_ids)
140 return bigsmp_cpu_to_logical_apicid(cpu);
141
142 return BAD_APICID;
143}
144
145static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
146{
147 return cpuid_apic >> index_msb;
148}
149
150static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
151{
152 default_send_IPI_mask_sequence_phys(mask, vector);
153}
154
155static void bigsmp_send_IPI_allbutself(int vector)
156{
157 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
158}
159
160static void bigsmp_send_IPI_all(int vector)
161{
162 bigsmp_send_IPI_mask(cpu_online_mask, vector);
163}
164
165static int dmi_bigsmp; /* can be set by dmi scanners */
166
167static int hp_ht_bigsmp(const struct dmi_system_id *d)
168{
169 printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
170 dmi_bigsmp = 1;
171
172 return 0;
173}
174
175
176static const struct dmi_system_id bigsmp_dmi_table[] = {
177 { hp_ht_bigsmp, "HP ProLiant DL760 G2",
178 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
179 DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
180 }
181 },
182
183 { hp_ht_bigsmp, "HP ProLiant DL740",
184 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
185 DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
186 }
187 },
188 { } /* NULL entry stops DMI scanning */
189};
190
191static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask)
192{
193 cpus_clear(*retmask);
194 cpu_set(cpu, *retmask);
195}
196
197static int probe_bigsmp(void)
198{
199 if (def_to_bigsmp)
200 dmi_bigsmp = 1;
201 else
202 dmi_check_system(bigsmp_dmi_table);
203
204 return dmi_bigsmp;
205}
206
207struct apic apic_bigsmp = {
208
209 .name = "bigsmp",
210 .probe = probe_bigsmp,
211 .acpi_madt_oem_check = NULL,
212 .apic_id_registered = bigsmp_apic_id_registered,
213
214 .irq_delivery_mode = dest_Fixed,
215 /* phys delivery to target CPU: */
216 .irq_dest_mode = 0,
217
218 .target_cpus = bigsmp_target_cpus,
219 .disable_esr = 1,
220 .dest_logical = 0,
221 .check_apicid_used = bigsmp_check_apicid_used,
222 .check_apicid_present = bigsmp_check_apicid_present,
223
224 .vector_allocation_domain = bigsmp_vector_allocation_domain,
225 .init_apic_ldr = bigsmp_init_apic_ldr,
226
227 .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
228 .setup_apic_routing = bigsmp_setup_apic_routing,
229 .multi_timer_check = NULL,
230 .apicid_to_node = bigsmp_apicid_to_node,
231 .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid,
232 .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
233 .apicid_to_cpu_present = bigsmp_apicid_to_cpu_present,
234 .setup_portio_remap = NULL,
235 .check_phys_apicid_present = bigsmp_check_phys_apicid_present,
236 .enable_apic_mode = NULL,
237 .phys_pkg_id = bigsmp_phys_pkg_id,
238 .mps_oem_check = NULL,
239
240 .get_apic_id = bigsmp_get_apic_id,
241 .set_apic_id = NULL,
242 .apic_id_mask = 0xFF << 24,
243
244 .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid,
245 .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and,
246
247 .send_IPI_mask = bigsmp_send_IPI_mask,
248 .send_IPI_mask_allbutself = NULL,
249 .send_IPI_allbutself = bigsmp_send_IPI_allbutself,
250 .send_IPI_all = bigsmp_send_IPI_all,
251 .send_IPI_self = default_send_IPI_self,
252
253 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
254 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
255
256 .wait_for_init_deassert = default_wait_for_init_deassert,
257
258 .smp_callin_clear_local_apic = NULL,
259 .inquire_remote_apic = default_inquire_remote_apic,
260
261 .read = native_apic_mem_read,
262 .write = native_apic_mem_write,
263 .icr_read = native_apic_icr_read,
264 .icr_write = native_apic_icr_write,
265 .wait_icr_idle = native_apic_wait_icr_idle,
266 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
267};
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
new file mode 100644
index 00000000000..19588f2770e
--- /dev/null
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -0,0 +1,780 @@
1/*
2 * Written by: Garry Forsgren, Unisys Corporation
3 * Natalie Protasevich, Unisys Corporation
4 *
5 * This file contains the code to configure and interface
6 * with Unisys ES7000 series hardware system manager.
7 *
8 * Copyright (c) 2003 Unisys Corporation.
9 * Copyright (C) 2009, Red Hat, Inc., Ingo Molnar
10 *
11 * All Rights Reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it would be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write the Free Software Foundation, Inc., 59
23 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
24 *
25 * Contact information: Unisys Corporation, Township Line & Union Meeting
26 * Roads-A, Unisys Way, Blue Bell, Pennsylvania, 19424, or:
27 *
28 * http://www.unisys.com
29 */
30#include <linux/notifier.h>
31#include <linux/spinlock.h>
32#include <linux/cpumask.h>
33#include <linux/threads.h>
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/reboot.h>
37#include <linux/string.h>
38#include <linux/types.h>
39#include <linux/errno.h>
40#include <linux/acpi.h>
41#include <linux/init.h>
42#include <linux/nmi.h>
43#include <linux/smp.h>
44#include <linux/io.h>
45
46#include <asm/apicdef.h>
47#include <asm/atomic.h>
48#include <asm/fixmap.h>
49#include <asm/mpspec.h>
50#include <asm/setup.h>
51#include <asm/apic.h>
52#include <asm/ipi.h>
53
54/*
55 * ES7000 chipsets
56 */
57
58#define NON_UNISYS 0
59#define ES7000_CLASSIC 1
60#define ES7000_ZORRO 2
61
62#define MIP_REG 1
63#define MIP_PSAI_REG 4
64
65#define MIP_BUSY 1
66#define MIP_SPIN 0xf0000
67#define MIP_VALID 0x0100000000000000ULL
68#define MIP_SW_APIC 0x1020b
69
70#define MIP_PORT(val) ((val >> 32) & 0xffff)
71
72#define MIP_RD_LO(val) (val & 0xffffffff)
73
74struct mip_reg {
75 unsigned long long off_0x00;
76 unsigned long long off_0x08;
77 unsigned long long off_0x10;
78 unsigned long long off_0x18;
79 unsigned long long off_0x20;
80 unsigned long long off_0x28;
81 unsigned long long off_0x30;
82 unsigned long long off_0x38;
83};
84
85struct mip_reg_info {
86 unsigned long long mip_info;
87 unsigned long long delivery_info;
88 unsigned long long host_reg;
89 unsigned long long mip_reg;
90};
91
92struct psai {
93 unsigned long long entry_type;
94 unsigned long long addr;
95 unsigned long long bep_addr;
96};
97
98#ifdef CONFIG_ACPI
99
100struct es7000_oem_table {
101 struct acpi_table_header Header;
102 u32 OEMTableAddr;
103 u32 OEMTableSize;
104};
105
106static unsigned long oem_addrX;
107static unsigned long oem_size;
108
109#endif
110
111/*
112 * ES7000 Globals
113 */
114
115static volatile unsigned long *psai;
116static struct mip_reg *mip_reg;
117static struct mip_reg *host_reg;
118static int mip_port;
119static unsigned long mip_addr;
120static unsigned long host_addr;
121
122int es7000_plat;
123
124/*
125 * GSI override for ES7000 platforms.
126 */
127
128static unsigned int base;
129
130static int
131es7000_rename_gsi(int ioapic, int gsi)
132{
133 if (es7000_plat == ES7000_ZORRO)
134 return gsi;
135
136 if (!base) {
137 int i;
138 for (i = 0; i < nr_ioapics; i++)
139 base += nr_ioapic_registers[i];
140 }
141
142 if (!ioapic && (gsi < 16))
143 gsi += base;
144
145 return gsi;
146}
147
148static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
149{
150 unsigned long vect = 0, psaival = 0;
151
152 if (psai == NULL)
153 return -1;
154
155 vect = ((unsigned long)__pa(eip)/0x1000) << 16;
156 psaival = (0x1000000 | vect | cpu);
157
158 while (*psai & 0x1000000)
159 ;
160
161 *psai = psaival;
162
163 return 0;
164}
165
166static int es7000_apic_is_cluster(void)
167{
168 /* MPENTIUMIII */
169 if (boot_cpu_data.x86 == 6 &&
170 (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11))
171 return 1;
172
173 return 0;
174}
175
176static void setup_unisys(void)
177{
178 /*
179 * Determine the generation of the ES7000 currently running.
180 *
181 * es7000_plat = 1 if the machine is a 5xx ES7000 box
182 * es7000_plat = 2 if the machine is a x86_64 ES7000 box
183 *
184 */
185 if (!(boot_cpu_data.x86 <= 15 && boot_cpu_data.x86_model <= 2))
186 es7000_plat = ES7000_ZORRO;
187 else
188 es7000_plat = ES7000_CLASSIC;
189 ioapic_renumber_irq = es7000_rename_gsi;
190}
191
192/*
193 * Parse the OEM Table:
194 */
195static int parse_unisys_oem(char *oemptr)
196{
197 int i;
198 int success = 0;
199 unsigned char type, size;
200 unsigned long val;
201 char *tp = NULL;
202 struct psai *psaip = NULL;
203 struct mip_reg_info *mi;
204 struct mip_reg *host, *mip;
205
206 tp = oemptr;
207
208 tp += 8;
209
210 for (i = 0; i <= 6; i++) {
211 type = *tp++;
212 size = *tp++;
213 tp -= 2;
214 switch (type) {
215 case MIP_REG:
216 mi = (struct mip_reg_info *)tp;
217 val = MIP_RD_LO(mi->host_reg);
218 host_addr = val;
219 host = (struct mip_reg *)val;
220 host_reg = __va(host);
221 val = MIP_RD_LO(mi->mip_reg);
222 mip_port = MIP_PORT(mi->mip_info);
223 mip_addr = val;
224 mip = (struct mip_reg *)val;
225 mip_reg = __va(mip);
226 pr_debug("es7000_mipcfg: host_reg = 0x%lx \n",
227 (unsigned long)host_reg);
228 pr_debug("es7000_mipcfg: mip_reg = 0x%lx \n",
229 (unsigned long)mip_reg);
230 success++;
231 break;
232 case MIP_PSAI_REG:
233 psaip = (struct psai *)tp;
234 if (tp != NULL) {
235 if (psaip->addr)
236 psai = __va(psaip->addr);
237 else
238 psai = NULL;
239 success++;
240 }
241 break;
242 default:
243 break;
244 }
245 tp += size;
246 }
247
248 if (success < 2)
249 es7000_plat = NON_UNISYS;
250 else
251 setup_unisys();
252
253 return es7000_plat;
254}
255
256#ifdef CONFIG_ACPI
257static int find_unisys_acpi_oem_table(unsigned long *oem_addr)
258{
259 struct acpi_table_header *header = NULL;
260 struct es7000_oem_table *table;
261 acpi_size tbl_size;
262 acpi_status ret;
263 int i = 0;
264
265 for (;;) {
266 ret = acpi_get_table_with_size("OEM1", i++, &header, &tbl_size);
267 if (!ACPI_SUCCESS(ret))
268 return -1;
269
270 if (!memcmp((char *) &header->oem_id, "UNISYS", 6))
271 break;
272
273 early_acpi_os_unmap_memory(header, tbl_size);
274 }
275
276 table = (void *)header;
277
278 oem_addrX = table->OEMTableAddr;
279 oem_size = table->OEMTableSize;
280
281 early_acpi_os_unmap_memory(header, tbl_size);
282
283 *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, oem_size);
284
285 return 0;
286}
287
288static void unmap_unisys_acpi_oem_table(unsigned long oem_addr)
289{
290 if (!oem_addr)
291 return;
292
293 __acpi_unmap_table((char *)oem_addr, oem_size);
294}
295
296static int es7000_check_dsdt(void)
297{
298 struct acpi_table_header header;
299
300 if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
301 !strncmp(header.oem_id, "UNISYS", 6))
302 return 1;
303 return 0;
304}
305
306static int es7000_acpi_ret;
307
308/* Hook from generic ACPI tables.c */
309static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
310{
311 unsigned long oem_addr = 0;
312 int check_dsdt;
313 int ret = 0;
314
315 /* check dsdt at first to avoid clear fix_map for oem_addr */
316 check_dsdt = es7000_check_dsdt();
317
318 if (!find_unisys_acpi_oem_table(&oem_addr)) {
319 if (check_dsdt) {
320 ret = parse_unisys_oem((char *)oem_addr);
321 } else {
322 setup_unisys();
323 ret = 1;
324 }
325 /*
326 * we need to unmap it
327 */
328 unmap_unisys_acpi_oem_table(oem_addr);
329 }
330
331 es7000_acpi_ret = ret;
332
333 return ret && !es7000_apic_is_cluster();
334}
335
336static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id)
337{
338 int ret = es7000_acpi_ret;
339
340 return ret && es7000_apic_is_cluster();
341}
342
343#else /* !CONFIG_ACPI: */
344static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
345{
346 return 0;
347}
348
349static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id)
350{
351 return 0;
352}
353#endif /* !CONFIG_ACPI */
354
355static void es7000_spin(int n)
356{
357 int i = 0;
358
359 while (i++ < n)
360 rep_nop();
361}
362
363static int es7000_mip_write(struct mip_reg *mip_reg)
364{
365 int status = 0;
366 int spin;
367
368 spin = MIP_SPIN;
369 while ((host_reg->off_0x38 & MIP_VALID) != 0) {
370 if (--spin <= 0) {
371 WARN(1, "Timeout waiting for Host Valid Flag\n");
372 return -1;
373 }
374 es7000_spin(MIP_SPIN);
375 }
376
377 memcpy(host_reg, mip_reg, sizeof(struct mip_reg));
378 outb(1, mip_port);
379
380 spin = MIP_SPIN;
381
382 while ((mip_reg->off_0x38 & MIP_VALID) == 0) {
383 if (--spin <= 0) {
384 WARN(1, "Timeout waiting for MIP Valid Flag\n");
385 return -1;
386 }
387 es7000_spin(MIP_SPIN);
388 }
389
390 status = (mip_reg->off_0x00 & 0xffff0000000000ULL) >> 48;
391 mip_reg->off_0x38 &= ~MIP_VALID;
392
393 return status;
394}
395
396static void es7000_enable_apic_mode(void)
397{
398 struct mip_reg es7000_mip_reg;
399 int mip_status;
400
401 if (!es7000_plat)
402 return;
403
404 printk(KERN_INFO "ES7000: Enabling APIC mode.\n");
405 memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
406 es7000_mip_reg.off_0x00 = MIP_SW_APIC;
407 es7000_mip_reg.off_0x38 = MIP_VALID;
408
409 while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0)
410 WARN(1, "Command failed, status = %x\n", mip_status);
411}
412
413static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
414{
415 /* Careful. Some cpus do not strictly honor the set of cpus
416 * specified in the interrupt destination when using lowest
417 * priority interrupt delivery mode.
418 *
419 * In particular there was a hyperthreading cpu observed to
420 * deliver interrupts to the wrong hyperthread when only one
421 * hyperthread was specified in the interrupt desitination.
422 */
423 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
424}
425
426
427static void es7000_wait_for_init_deassert(atomic_t *deassert)
428{
429 while (!atomic_read(deassert))
430 cpu_relax();
431}
432
433static unsigned int es7000_get_apic_id(unsigned long x)
434{
435 return (x >> 24) & 0xFF;
436}
437
438static void es7000_send_IPI_mask(const struct cpumask *mask, int vector)
439{
440 default_send_IPI_mask_sequence_phys(mask, vector);
441}
442
443static void es7000_send_IPI_allbutself(int vector)
444{
445 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
446}
447
448static void es7000_send_IPI_all(int vector)
449{
450 es7000_send_IPI_mask(cpu_online_mask, vector);
451}
452
453static int es7000_apic_id_registered(void)
454{
455 return 1;
456}
457
458static const cpumask_t *target_cpus_cluster(void)
459{
460 return &CPU_MASK_ALL;
461}
462
463static const cpumask_t *es7000_target_cpus(void)
464{
465 return &cpumask_of_cpu(smp_processor_id());
466}
467
468static unsigned long
469es7000_check_apicid_used(physid_mask_t bitmap, int apicid)
470{
471 return 0;
472}
473static unsigned long es7000_check_apicid_present(int bit)
474{
475 return physid_isset(bit, phys_cpu_present_map);
476}
477
478static unsigned long calculate_ldr(int cpu)
479{
480 unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu);
481
482 return SET_APIC_LOGICAL_ID(id);
483}
484
485/*
486 * Set up the logical destination ID.
487 *
488 * Intel recommends to set DFR, LdR and TPR before enabling
489 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
490 * document number 292116). So here it goes...
491 */
492static void es7000_init_apic_ldr_cluster(void)
493{
494 unsigned long val;
495 int cpu = smp_processor_id();
496
497 apic_write(APIC_DFR, APIC_DFR_CLUSTER);
498 val = calculate_ldr(cpu);
499 apic_write(APIC_LDR, val);
500}
501
502static void es7000_init_apic_ldr(void)
503{
504 unsigned long val;
505 int cpu = smp_processor_id();
506
507 apic_write(APIC_DFR, APIC_DFR_FLAT);
508 val = calculate_ldr(cpu);
509 apic_write(APIC_LDR, val);
510}
511
512static void es7000_setup_apic_routing(void)
513{
514 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
515
516 printk(KERN_INFO
517 "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
518 (apic_version[apic] == 0x14) ?
519 "Physical Cluster" : "Logical Cluster",
520 nr_ioapics, cpus_addr(*es7000_target_cpus())[0]);
521}
522
523static int es7000_apicid_to_node(int logical_apicid)
524{
525 return 0;
526}
527
528
529static int es7000_cpu_present_to_apicid(int mps_cpu)
530{
531 if (!mps_cpu)
532 return boot_cpu_physical_apicid;
533 else if (mps_cpu < nr_cpu_ids)
534 return per_cpu(x86_bios_cpu_apicid, mps_cpu);
535 else
536 return BAD_APICID;
537}
538
539static int cpu_id;
540
541static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid)
542{
543 physid_mask_t mask;
544
545 mask = physid_mask_of_physid(cpu_id);
546 ++cpu_id;
547
548 return mask;
549}
550
551/* Mapping from cpu number to logical apicid */
552static int es7000_cpu_to_logical_apicid(int cpu)
553{
554#ifdef CONFIG_SMP
555 if (cpu >= nr_cpu_ids)
556 return BAD_APICID;
557 return cpu_2_logical_apicid[cpu];
558#else
559 return logical_smp_processor_id();
560#endif
561}
562
563static physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map)
564{
565 /* For clustered we don't have a good way to do this yet - hack */
566 return physids_promote(0xff);
567}
568
569static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
570{
571 boot_cpu_physical_apicid = read_apic_id();
572 return 1;
573}
574
575static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask)
576{
577 unsigned int round = 0;
578 int cpu, uninitialized_var(apicid);
579
580 /*
581 * The cpus in the mask must all be on the apic cluster.
582 */
583 for_each_cpu(cpu, cpumask) {
584 int new_apicid = es7000_cpu_to_logical_apicid(cpu);
585
586 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
587 WARN(1, "Not a valid mask!");
588
589 return BAD_APICID;
590 }
591 apicid = new_apicid;
592 round++;
593 }
594 return apicid;
595}
596
597static unsigned int
598es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
599 const struct cpumask *andmask)
600{
601 int apicid = es7000_cpu_to_logical_apicid(0);
602 cpumask_var_t cpumask;
603
604 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
605 return apicid;
606
607 cpumask_and(cpumask, inmask, andmask);
608 cpumask_and(cpumask, cpumask, cpu_online_mask);
609 apicid = es7000_cpu_mask_to_apicid(cpumask);
610
611 free_cpumask_var(cpumask);
612
613 return apicid;
614}
615
616static int es7000_phys_pkg_id(int cpuid_apic, int index_msb)
617{
618 return cpuid_apic >> index_msb;
619}
620
621static int probe_es7000(void)
622{
623 /* probed later in mptable/ACPI hooks */
624 return 0;
625}
626
627static int es7000_mps_ret;
628static int es7000_mps_oem_check(struct mpc_table *mpc, char *oem,
629 char *productid)
630{
631 int ret = 0;
632
633 if (mpc->oemptr) {
634 struct mpc_oemtable *oem_table =
635 (struct mpc_oemtable *)mpc->oemptr;
636
637 if (!strncmp(oem, "UNISYS", 6))
638 ret = parse_unisys_oem((char *)oem_table);
639 }
640
641 es7000_mps_ret = ret;
642
643 return ret && !es7000_apic_is_cluster();
644}
645
646static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
647 char *productid)
648{
649 int ret = es7000_mps_ret;
650
651 return ret && es7000_apic_is_cluster();
652}
653
654struct apic apic_es7000_cluster = {
655
656 .name = "es7000",
657 .probe = probe_es7000,
658 .acpi_madt_oem_check = es7000_acpi_madt_oem_check_cluster,
659 .apic_id_registered = es7000_apic_id_registered,
660
661 .irq_delivery_mode = dest_LowestPrio,
662 /* logical delivery broadcast to all procs: */
663 .irq_dest_mode = 1,
664
665 .target_cpus = target_cpus_cluster,
666 .disable_esr = 1,
667 .dest_logical = 0,
668 .check_apicid_used = es7000_check_apicid_used,
669 .check_apicid_present = es7000_check_apicid_present,
670
671 .vector_allocation_domain = es7000_vector_allocation_domain,
672 .init_apic_ldr = es7000_init_apic_ldr_cluster,
673
674 .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
675 .setup_apic_routing = es7000_setup_apic_routing,
676 .multi_timer_check = NULL,
677 .apicid_to_node = es7000_apicid_to_node,
678 .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
679 .cpu_present_to_apicid = es7000_cpu_present_to_apicid,
680 .apicid_to_cpu_present = es7000_apicid_to_cpu_present,
681 .setup_portio_remap = NULL,
682 .check_phys_apicid_present = es7000_check_phys_apicid_present,
683 .enable_apic_mode = es7000_enable_apic_mode,
684 .phys_pkg_id = es7000_phys_pkg_id,
685 .mps_oem_check = es7000_mps_oem_check_cluster,
686
687 .get_apic_id = es7000_get_apic_id,
688 .set_apic_id = NULL,
689 .apic_id_mask = 0xFF << 24,
690
691 .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
692 .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
693
694 .send_IPI_mask = es7000_send_IPI_mask,
695 .send_IPI_mask_allbutself = NULL,
696 .send_IPI_allbutself = es7000_send_IPI_allbutself,
697 .send_IPI_all = es7000_send_IPI_all,
698 .send_IPI_self = default_send_IPI_self,
699
700 .wakeup_secondary_cpu = wakeup_secondary_cpu_via_mip,
701
702 .trampoline_phys_low = 0x467,
703 .trampoline_phys_high = 0x469,
704
705 .wait_for_init_deassert = NULL,
706
707 /* Nothing to do for most platforms, since cleared by the INIT cycle: */
708 .smp_callin_clear_local_apic = NULL,
709 .inquire_remote_apic = default_inquire_remote_apic,
710
711 .read = native_apic_mem_read,
712 .write = native_apic_mem_write,
713 .icr_read = native_apic_icr_read,
714 .icr_write = native_apic_icr_write,
715 .wait_icr_idle = native_apic_wait_icr_idle,
716 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
717};
718
719struct apic apic_es7000 = {
720
721 .name = "es7000",
722 .probe = probe_es7000,
723 .acpi_madt_oem_check = es7000_acpi_madt_oem_check,
724 .apic_id_registered = es7000_apic_id_registered,
725
726 .irq_delivery_mode = dest_Fixed,
727 /* phys delivery to target CPUs: */
728 .irq_dest_mode = 0,
729
730 .target_cpus = es7000_target_cpus,
731 .disable_esr = 1,
732 .dest_logical = 0,
733 .check_apicid_used = es7000_check_apicid_used,
734 .check_apicid_present = es7000_check_apicid_present,
735
736 .vector_allocation_domain = es7000_vector_allocation_domain,
737 .init_apic_ldr = es7000_init_apic_ldr,
738
739 .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
740 .setup_apic_routing = es7000_setup_apic_routing,
741 .multi_timer_check = NULL,
742 .apicid_to_node = es7000_apicid_to_node,
743 .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
744 .cpu_present_to_apicid = es7000_cpu_present_to_apicid,
745 .apicid_to_cpu_present = es7000_apicid_to_cpu_present,
746 .setup_portio_remap = NULL,
747 .check_phys_apicid_present = es7000_check_phys_apicid_present,
748 .enable_apic_mode = es7000_enable_apic_mode,
749 .phys_pkg_id = es7000_phys_pkg_id,
750 .mps_oem_check = es7000_mps_oem_check,
751
752 .get_apic_id = es7000_get_apic_id,
753 .set_apic_id = NULL,
754 .apic_id_mask = 0xFF << 24,
755
756 .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
757 .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
758
759 .send_IPI_mask = es7000_send_IPI_mask,
760 .send_IPI_mask_allbutself = NULL,
761 .send_IPI_allbutself = es7000_send_IPI_allbutself,
762 .send_IPI_all = es7000_send_IPI_all,
763 .send_IPI_self = default_send_IPI_self,
764
765 .trampoline_phys_low = 0x467,
766 .trampoline_phys_high = 0x469,
767
768 .wait_for_init_deassert = es7000_wait_for_init_deassert,
769
770 /* Nothing to do for most platforms, since cleared by the INIT cycle: */
771 .smp_callin_clear_local_apic = NULL,
772 .inquire_remote_apic = default_inquire_remote_apic,
773
774 .read = native_apic_mem_read,
775 .write = native_apic_mem_write,
776 .icr_read = native_apic_icr_read,
777 .icr_write = native_apic_icr_write,
778 .wait_icr_idle = native_apic_wait_icr_idle,
779 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
780};
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index bc7ac4da90d..00e6071cefc 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Intel IO-APIC support for multi-Pentium hosts. 2 * Intel IO-APIC support for multi-Pentium hosts.
3 * 3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo 4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * 5 *
6 * Many thanks to Stig Venaas for trying out countless experimental 6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently! 7 * patches and reporting/debugging problems patiently!
@@ -46,6 +46,7 @@
46#include <asm/idle.h> 46#include <asm/idle.h>
47#include <asm/io.h> 47#include <asm/io.h>
48#include <asm/smp.h> 48#include <asm/smp.h>
49#include <asm/cpu.h>
49#include <asm/desc.h> 50#include <asm/desc.h>
50#include <asm/proto.h> 51#include <asm/proto.h>
51#include <asm/acpi.h> 52#include <asm/acpi.h>
@@ -61,9 +62,7 @@
61#include <asm/uv/uv_hub.h> 62#include <asm/uv/uv_hub.h>
62#include <asm/uv/uv_irq.h> 63#include <asm/uv/uv_irq.h>
63 64
64#include <mach_ipi.h> 65#include <asm/apic.h>
65#include <mach_apic.h>
66#include <mach_apicdef.h>
67 66
68#define __apicdebuginit(type) static type __init 67#define __apicdebuginit(type) static type __init
69 68
@@ -82,11 +81,11 @@ static DEFINE_SPINLOCK(vector_lock);
82int nr_ioapic_registers[MAX_IO_APICS]; 81int nr_ioapic_registers[MAX_IO_APICS];
83 82
84/* I/O APIC entries */ 83/* I/O APIC entries */
85struct mp_config_ioapic mp_ioapics[MAX_IO_APICS]; 84struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
86int nr_ioapics; 85int nr_ioapics;
87 86
88/* MP IRQ source entries */ 87/* MP IRQ source entries */
89struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; 88struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
90 89
91/* # of MP IRQ source entries */ 90/* # of MP IRQ source entries */
92int mp_irq_entries; 91int mp_irq_entries;
@@ -99,10 +98,19 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
99 98
100int skip_ioapic_setup; 99int skip_ioapic_setup;
101 100
101void arch_disable_smp_support(void)
102{
103#ifdef CONFIG_PCI
104 noioapicquirk = 1;
105 noioapicreroute = -1;
106#endif
107 skip_ioapic_setup = 1;
108}
109
102static int __init parse_noapic(char *str) 110static int __init parse_noapic(char *str)
103{ 111{
104 /* disable IO-APIC */ 112 /* disable IO-APIC */
105 disable_ioapic_setup(); 113 arch_disable_smp_support();
106 return 0; 114 return 0;
107} 115}
108early_param("noapic", parse_noapic); 116early_param("noapic", parse_noapic);
@@ -356,7 +364,7 @@ set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
356 364
357 if (!cfg->move_in_progress) { 365 if (!cfg->move_in_progress) {
358 /* it means that domain is not changed */ 366 /* it means that domain is not changed */
359 if (!cpumask_intersects(&desc->affinity, mask)) 367 if (!cpumask_intersects(desc->affinity, mask))
360 cfg->move_desc_pending = 1; 368 cfg->move_desc_pending = 1;
361 } 369 }
362} 370}
@@ -386,7 +394,7 @@ struct io_apic {
386static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) 394static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
387{ 395{
388 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) 396 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
389 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK); 397 + (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
390} 398}
391 399
392static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) 400static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
@@ -478,7 +486,7 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
478 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 486 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
479} 487}
480 488
481static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 489void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
482{ 490{
483 unsigned long flags; 491 unsigned long flags;
484 spin_lock_irqsave(&ioapic_lock, flags); 492 spin_lock_irqsave(&ioapic_lock, flags);
@@ -513,11 +521,11 @@ static void send_cleanup_vector(struct irq_cfg *cfg)
513 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 521 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
514 cfg->move_cleanup_count++; 522 cfg->move_cleanup_count++;
515 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 523 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
516 send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); 524 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
517 } else { 525 } else {
518 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); 526 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
519 cfg->move_cleanup_count = cpumask_weight(cleanup_mask); 527 cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
520 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 528 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
521 free_cpumask_var(cleanup_mask); 529 free_cpumask_var(cleanup_mask);
522 } 530 }
523 cfg->move_in_progress = 0; 531 cfg->move_in_progress = 0;
@@ -562,8 +570,9 @@ static int
562assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); 570assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
563 571
564/* 572/*
565 * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid 573 * Either sets desc->affinity to a valid value, and returns
566 * of that, or returns BAD_APICID and leaves desc->affinity untouched. 574 * ->cpu_mask_to_apicid of that, or returns BAD_APICID and
575 * leaves desc->affinity untouched.
567 */ 576 */
568static unsigned int 577static unsigned int
569set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) 578set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
@@ -579,9 +588,10 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
579 if (assign_irq_vector(irq, cfg, mask)) 588 if (assign_irq_vector(irq, cfg, mask))
580 return BAD_APICID; 589 return BAD_APICID;
581 590
582 cpumask_and(&desc->affinity, cfg->domain, mask); 591 cpumask_and(desc->affinity, cfg->domain, mask);
583 set_extra_move_desc(desc, mask); 592 set_extra_move_desc(desc, mask);
584 return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask); 593
594 return apic->cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
585} 595}
586 596
587static void 597static void
@@ -796,23 +806,6 @@ static void clear_IO_APIC (void)
796 clear_IO_APIC_pin(apic, pin); 806 clear_IO_APIC_pin(apic, pin);
797} 807}
798 808
799#if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
800void send_IPI_self(int vector)
801{
802 unsigned int cfg;
803
804 /*
805 * Wait for idle.
806 */
807 apic_wait_icr_idle();
808 cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
809 /*
810 * Send the IPI. The write to APIC_ICR fires this off.
811 */
812 apic_write(APIC_ICR, cfg);
813}
814#endif /* !CONFIG_SMP && CONFIG_X86_32*/
815
816#ifdef CONFIG_X86_32 809#ifdef CONFIG_X86_32
817/* 810/*
818 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to 811 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
@@ -820,8 +813,9 @@ void send_IPI_self(int vector)
820 */ 813 */
821 814
822#define MAX_PIRQS 8 815#define MAX_PIRQS 8
823static int pirq_entries [MAX_PIRQS]; 816static int pirq_entries[MAX_PIRQS] = {
824static int pirqs_enabled; 817 [0 ... MAX_PIRQS - 1] = -1
818};
825 819
826static int __init ioapic_pirq_setup(char *str) 820static int __init ioapic_pirq_setup(char *str)
827{ 821{
@@ -830,10 +824,6 @@ static int __init ioapic_pirq_setup(char *str)
830 824
831 get_options(str, ARRAY_SIZE(ints), ints); 825 get_options(str, ARRAY_SIZE(ints), ints);
832 826
833 for (i = 0; i < MAX_PIRQS; i++)
834 pirq_entries[i] = -1;
835
836 pirqs_enabled = 1;
837 apic_printk(APIC_VERBOSE, KERN_INFO 827 apic_printk(APIC_VERBOSE, KERN_INFO
838 "PIRQ redirection, working around broken MP-BIOS.\n"); 828 "PIRQ redirection, working around broken MP-BIOS.\n");
839 max = MAX_PIRQS; 829 max = MAX_PIRQS;
@@ -944,10 +934,10 @@ static int find_irq_entry(int apic, int pin, int type)
944 int i; 934 int i;
945 935
946 for (i = 0; i < mp_irq_entries; i++) 936 for (i = 0; i < mp_irq_entries; i++)
947 if (mp_irqs[i].mp_irqtype == type && 937 if (mp_irqs[i].irqtype == type &&
948 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid || 938 (mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
949 mp_irqs[i].mp_dstapic == MP_APIC_ALL) && 939 mp_irqs[i].dstapic == MP_APIC_ALL) &&
950 mp_irqs[i].mp_dstirq == pin) 940 mp_irqs[i].dstirq == pin)
951 return i; 941 return i;
952 942
953 return -1; 943 return -1;
@@ -961,13 +951,13 @@ static int __init find_isa_irq_pin(int irq, int type)
961 int i; 951 int i;
962 952
963 for (i = 0; i < mp_irq_entries; i++) { 953 for (i = 0; i < mp_irq_entries; i++) {
964 int lbus = mp_irqs[i].mp_srcbus; 954 int lbus = mp_irqs[i].srcbus;
965 955
966 if (test_bit(lbus, mp_bus_not_pci) && 956 if (test_bit(lbus, mp_bus_not_pci) &&
967 (mp_irqs[i].mp_irqtype == type) && 957 (mp_irqs[i].irqtype == type) &&
968 (mp_irqs[i].mp_srcbusirq == irq)) 958 (mp_irqs[i].srcbusirq == irq))
969 959
970 return mp_irqs[i].mp_dstirq; 960 return mp_irqs[i].dstirq;
971 } 961 }
972 return -1; 962 return -1;
973} 963}
@@ -977,17 +967,17 @@ static int __init find_isa_irq_apic(int irq, int type)
977 int i; 967 int i;
978 968
979 for (i = 0; i < mp_irq_entries; i++) { 969 for (i = 0; i < mp_irq_entries; i++) {
980 int lbus = mp_irqs[i].mp_srcbus; 970 int lbus = mp_irqs[i].srcbus;
981 971
982 if (test_bit(lbus, mp_bus_not_pci) && 972 if (test_bit(lbus, mp_bus_not_pci) &&
983 (mp_irqs[i].mp_irqtype == type) && 973 (mp_irqs[i].irqtype == type) &&
984 (mp_irqs[i].mp_srcbusirq == irq)) 974 (mp_irqs[i].srcbusirq == irq))
985 break; 975 break;
986 } 976 }
987 if (i < mp_irq_entries) { 977 if (i < mp_irq_entries) {
988 int apic; 978 int apic;
989 for(apic = 0; apic < nr_ioapics; apic++) { 979 for(apic = 0; apic < nr_ioapics; apic++) {
990 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic) 980 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
991 return apic; 981 return apic;
992 } 982 }
993 } 983 }
@@ -1012,23 +1002,23 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
1012 return -1; 1002 return -1;
1013 } 1003 }
1014 for (i = 0; i < mp_irq_entries; i++) { 1004 for (i = 0; i < mp_irq_entries; i++) {
1015 int lbus = mp_irqs[i].mp_srcbus; 1005 int lbus = mp_irqs[i].srcbus;
1016 1006
1017 for (apic = 0; apic < nr_ioapics; apic++) 1007 for (apic = 0; apic < nr_ioapics; apic++)
1018 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic || 1008 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
1019 mp_irqs[i].mp_dstapic == MP_APIC_ALL) 1009 mp_irqs[i].dstapic == MP_APIC_ALL)
1020 break; 1010 break;
1021 1011
1022 if (!test_bit(lbus, mp_bus_not_pci) && 1012 if (!test_bit(lbus, mp_bus_not_pci) &&
1023 !mp_irqs[i].mp_irqtype && 1013 !mp_irqs[i].irqtype &&
1024 (bus == lbus) && 1014 (bus == lbus) &&
1025 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) { 1015 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
1026 int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq); 1016 int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);
1027 1017
1028 if (!(apic || IO_APIC_IRQ(irq))) 1018 if (!(apic || IO_APIC_IRQ(irq)))
1029 continue; 1019 continue;
1030 1020
1031 if (pin == (mp_irqs[i].mp_srcbusirq & 3)) 1021 if (pin == (mp_irqs[i].srcbusirq & 3))
1032 return irq; 1022 return irq;
1033 /* 1023 /*
1034 * Use the first all-but-pin matching entry as a 1024 * Use the first all-but-pin matching entry as a
@@ -1071,7 +1061,7 @@ static int EISA_ELCR(unsigned int irq)
1071 * EISA conforming in the MP table, that means its trigger type must 1061 * EISA conforming in the MP table, that means its trigger type must
1072 * be read in from the ELCR */ 1062 * be read in from the ELCR */
1073 1063
1074#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq)) 1064#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq))
1075#define default_EISA_polarity(idx) default_ISA_polarity(idx) 1065#define default_EISA_polarity(idx) default_ISA_polarity(idx)
1076 1066
1077/* PCI interrupts are always polarity one level triggered, 1067/* PCI interrupts are always polarity one level triggered,
@@ -1088,13 +1078,13 @@ static int EISA_ELCR(unsigned int irq)
1088 1078
1089static int MPBIOS_polarity(int idx) 1079static int MPBIOS_polarity(int idx)
1090{ 1080{
1091 int bus = mp_irqs[idx].mp_srcbus; 1081 int bus = mp_irqs[idx].srcbus;
1092 int polarity; 1082 int polarity;
1093 1083
1094 /* 1084 /*
1095 * Determine IRQ line polarity (high active or low active): 1085 * Determine IRQ line polarity (high active or low active):
1096 */ 1086 */
1097 switch (mp_irqs[idx].mp_irqflag & 3) 1087 switch (mp_irqs[idx].irqflag & 3)
1098 { 1088 {
1099 case 0: /* conforms, ie. bus-type dependent polarity */ 1089 case 0: /* conforms, ie. bus-type dependent polarity */
1100 if (test_bit(bus, mp_bus_not_pci)) 1090 if (test_bit(bus, mp_bus_not_pci))
@@ -1130,13 +1120,13 @@ static int MPBIOS_polarity(int idx)
1130 1120
1131static int MPBIOS_trigger(int idx) 1121static int MPBIOS_trigger(int idx)
1132{ 1122{
1133 int bus = mp_irqs[idx].mp_srcbus; 1123 int bus = mp_irqs[idx].srcbus;
1134 int trigger; 1124 int trigger;
1135 1125
1136 /* 1126 /*
1137 * Determine IRQ trigger mode (edge or level sensitive): 1127 * Determine IRQ trigger mode (edge or level sensitive):
1138 */ 1128 */
1139 switch ((mp_irqs[idx].mp_irqflag>>2) & 3) 1129 switch ((mp_irqs[idx].irqflag>>2) & 3)
1140 { 1130 {
1141 case 0: /* conforms, ie. bus-type dependent */ 1131 case 0: /* conforms, ie. bus-type dependent */
1142 if (test_bit(bus, mp_bus_not_pci)) 1132 if (test_bit(bus, mp_bus_not_pci))
@@ -1214,16 +1204,16 @@ int (*ioapic_renumber_irq)(int ioapic, int irq);
1214static int pin_2_irq(int idx, int apic, int pin) 1204static int pin_2_irq(int idx, int apic, int pin)
1215{ 1205{
1216 int irq, i; 1206 int irq, i;
1217 int bus = mp_irqs[idx].mp_srcbus; 1207 int bus = mp_irqs[idx].srcbus;
1218 1208
1219 /* 1209 /*
1220 * Debugging check, we are in big trouble if this message pops up! 1210 * Debugging check, we are in big trouble if this message pops up!
1221 */ 1211 */
1222 if (mp_irqs[idx].mp_dstirq != pin) 1212 if (mp_irqs[idx].dstirq != pin)
1223 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); 1213 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
1224 1214
1225 if (test_bit(bus, mp_bus_not_pci)) { 1215 if (test_bit(bus, mp_bus_not_pci)) {
1226 irq = mp_irqs[idx].mp_srcbusirq; 1216 irq = mp_irqs[idx].srcbusirq;
1227 } else { 1217 } else {
1228 /* 1218 /*
1229 * PCI IRQs are mapped in order 1219 * PCI IRQs are mapped in order
@@ -1315,7 +1305,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1315 int new_cpu; 1305 int new_cpu;
1316 int vector, offset; 1306 int vector, offset;
1317 1307
1318 vector_allocation_domain(cpu, tmp_mask); 1308 apic->vector_allocation_domain(cpu, tmp_mask);
1319 1309
1320 vector = current_vector; 1310 vector = current_vector;
1321 offset = current_offset; 1311 offset = current_offset;
@@ -1485,10 +1475,10 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t
1485 handle_edge_irq, "edge"); 1475 handle_edge_irq, "edge");
1486} 1476}
1487 1477
1488static int setup_ioapic_entry(int apic, int irq, 1478int setup_ioapic_entry(int apic_id, int irq,
1489 struct IO_APIC_route_entry *entry, 1479 struct IO_APIC_route_entry *entry,
1490 unsigned int destination, int trigger, 1480 unsigned int destination, int trigger,
1491 int polarity, int vector) 1481 int polarity, int vector)
1492{ 1482{
1493 /* 1483 /*
1494 * add it to the IO-APIC irq-routing table: 1484 * add it to the IO-APIC irq-routing table:
@@ -1497,25 +1487,25 @@ static int setup_ioapic_entry(int apic, int irq,
1497 1487
1498#ifdef CONFIG_INTR_REMAP 1488#ifdef CONFIG_INTR_REMAP
1499 if (intr_remapping_enabled) { 1489 if (intr_remapping_enabled) {
1500 struct intel_iommu *iommu = map_ioapic_to_ir(apic); 1490 struct intel_iommu *iommu = map_ioapic_to_ir(apic_id);
1501 struct irte irte; 1491 struct irte irte;
1502 struct IR_IO_APIC_route_entry *ir_entry = 1492 struct IR_IO_APIC_route_entry *ir_entry =
1503 (struct IR_IO_APIC_route_entry *) entry; 1493 (struct IR_IO_APIC_route_entry *) entry;
1504 int index; 1494 int index;
1505 1495
1506 if (!iommu) 1496 if (!iommu)
1507 panic("No mapping iommu for ioapic %d\n", apic); 1497 panic("No mapping iommu for ioapic %d\n", apic_id);
1508 1498
1509 index = alloc_irte(iommu, irq, 1); 1499 index = alloc_irte(iommu, irq, 1);
1510 if (index < 0) 1500 if (index < 0)
1511 panic("Failed to allocate IRTE for ioapic %d\n", apic); 1501 panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
1512 1502
1513 memset(&irte, 0, sizeof(irte)); 1503 memset(&irte, 0, sizeof(irte));
1514 1504
1515 irte.present = 1; 1505 irte.present = 1;
1516 irte.dst_mode = INT_DEST_MODE; 1506 irte.dst_mode = apic->irq_dest_mode;
1517 irte.trigger_mode = trigger; 1507 irte.trigger_mode = trigger;
1518 irte.dlvry_mode = INT_DELIVERY_MODE; 1508 irte.dlvry_mode = apic->irq_delivery_mode;
1519 irte.vector = vector; 1509 irte.vector = vector;
1520 irte.dest_id = IRTE_DEST(destination); 1510 irte.dest_id = IRTE_DEST(destination);
1521 1511
@@ -1528,8 +1518,8 @@ static int setup_ioapic_entry(int apic, int irq,
1528 } else 1518 } else
1529#endif 1519#endif
1530 { 1520 {
1531 entry->delivery_mode = INT_DELIVERY_MODE; 1521 entry->delivery_mode = apic->irq_delivery_mode;
1532 entry->dest_mode = INT_DEST_MODE; 1522 entry->dest_mode = apic->irq_dest_mode;
1533 entry->dest = destination; 1523 entry->dest = destination;
1534 } 1524 }
1535 1525
@@ -1546,7 +1536,7 @@ static int setup_ioapic_entry(int apic, int irq,
1546 return 0; 1536 return 0;
1547} 1537}
1548 1538
1549static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_desc *desc, 1539static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc,
1550 int trigger, int polarity) 1540 int trigger, int polarity)
1551{ 1541{
1552 struct irq_cfg *cfg; 1542 struct irq_cfg *cfg;
@@ -1558,22 +1548,22 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
1558 1548
1559 cfg = desc->chip_data; 1549 cfg = desc->chip_data;
1560 1550
1561 if (assign_irq_vector(irq, cfg, TARGET_CPUS)) 1551 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1562 return; 1552 return;
1563 1553
1564 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); 1554 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
1565 1555
1566 apic_printk(APIC_VERBOSE,KERN_DEBUG 1556 apic_printk(APIC_VERBOSE,KERN_DEBUG
1567 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1557 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1568 "IRQ %d Mode:%i Active:%i)\n", 1558 "IRQ %d Mode:%i Active:%i)\n",
1569 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector, 1559 apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector,
1570 irq, trigger, polarity); 1560 irq, trigger, polarity);
1571 1561
1572 1562
1573 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, 1563 if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry,
1574 dest, trigger, polarity, cfg->vector)) { 1564 dest, trigger, polarity, cfg->vector)) {
1575 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1565 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1576 mp_ioapics[apic].mp_apicid, pin); 1566 mp_ioapics[apic_id].apicid, pin);
1577 __clear_irq_vector(irq, cfg); 1567 __clear_irq_vector(irq, cfg);
1578 return; 1568 return;
1579 } 1569 }
@@ -1582,12 +1572,12 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
1582 if (irq < NR_IRQS_LEGACY) 1572 if (irq < NR_IRQS_LEGACY)
1583 disable_8259A_irq(irq); 1573 disable_8259A_irq(irq);
1584 1574
1585 ioapic_write_entry(apic, pin, entry); 1575 ioapic_write_entry(apic_id, pin, entry);
1586} 1576}
1587 1577
1588static void __init setup_IO_APIC_irqs(void) 1578static void __init setup_IO_APIC_irqs(void)
1589{ 1579{
1590 int apic, pin, idx, irq; 1580 int apic_id, pin, idx, irq;
1591 int notcon = 0; 1581 int notcon = 0;
1592 struct irq_desc *desc; 1582 struct irq_desc *desc;
1593 struct irq_cfg *cfg; 1583 struct irq_cfg *cfg;
@@ -1595,21 +1585,19 @@ static void __init setup_IO_APIC_irqs(void)
1595 1585
1596 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1586 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1597 1587
1598 for (apic = 0; apic < nr_ioapics; apic++) { 1588 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
1599 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 1589 for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
1600 1590
1601 idx = find_irq_entry(apic, pin, mp_INT); 1591 idx = find_irq_entry(apic_id, pin, mp_INT);
1602 if (idx == -1) { 1592 if (idx == -1) {
1603 if (!notcon) { 1593 if (!notcon) {
1604 notcon = 1; 1594 notcon = 1;
1605 apic_printk(APIC_VERBOSE, 1595 apic_printk(APIC_VERBOSE,
1606 KERN_DEBUG " %d-%d", 1596 KERN_DEBUG " %d-%d",
1607 mp_ioapics[apic].mp_apicid, 1597 mp_ioapics[apic_id].apicid, pin);
1608 pin);
1609 } else 1598 } else
1610 apic_printk(APIC_VERBOSE, " %d-%d", 1599 apic_printk(APIC_VERBOSE, " %d-%d",
1611 mp_ioapics[apic].mp_apicid, 1600 mp_ioapics[apic_id].apicid, pin);
1612 pin);
1613 continue; 1601 continue;
1614 } 1602 }
1615 if (notcon) { 1603 if (notcon) {
@@ -1618,20 +1606,25 @@ static void __init setup_IO_APIC_irqs(void)
1618 notcon = 0; 1606 notcon = 0;
1619 } 1607 }
1620 1608
1621 irq = pin_2_irq(idx, apic, pin); 1609 irq = pin_2_irq(idx, apic_id, pin);
1622#ifdef CONFIG_X86_32 1610
1623 if (multi_timer_check(apic, irq)) 1611 /*
1612 * Skip the timer IRQ if there's a quirk handler
1613 * installed and if it returns 1:
1614 */
1615 if (apic->multi_timer_check &&
1616 apic->multi_timer_check(apic_id, irq))
1624 continue; 1617 continue;
1625#endif 1618
1626 desc = irq_to_desc_alloc_cpu(irq, cpu); 1619 desc = irq_to_desc_alloc_cpu(irq, cpu);
1627 if (!desc) { 1620 if (!desc) {
1628 printk(KERN_INFO "can not get irq_desc for %d\n", irq); 1621 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1629 continue; 1622 continue;
1630 } 1623 }
1631 cfg = desc->chip_data; 1624 cfg = desc->chip_data;
1632 add_pin_to_irq_cpu(cfg, cpu, apic, pin); 1625 add_pin_to_irq_cpu(cfg, cpu, apic_id, pin);
1633 1626
1634 setup_IO_APIC_irq(apic, pin, irq, desc, 1627 setup_IO_APIC_irq(apic_id, pin, irq, desc,
1635 irq_trigger(idx), irq_polarity(idx)); 1628 irq_trigger(idx), irq_polarity(idx));
1636 } 1629 }
1637 } 1630 }
@@ -1644,7 +1637,7 @@ static void __init setup_IO_APIC_irqs(void)
1644/* 1637/*
1645 * Set up the timer pin, possibly with the 8259A-master behind. 1638 * Set up the timer pin, possibly with the 8259A-master behind.
1646 */ 1639 */
1647static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin, 1640static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
1648 int vector) 1641 int vector)
1649{ 1642{
1650 struct IO_APIC_route_entry entry; 1643 struct IO_APIC_route_entry entry;
@@ -1660,10 +1653,10 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1660 * We use logical delivery to get the timer IRQ 1653 * We use logical delivery to get the timer IRQ
1661 * to the first CPU. 1654 * to the first CPU.
1662 */ 1655 */
1663 entry.dest_mode = INT_DEST_MODE; 1656 entry.dest_mode = apic->irq_dest_mode;
1664 entry.mask = 1; /* mask IRQ now */ 1657 entry.mask = 0; /* don't mask IRQ for edge */
1665 entry.dest = cpu_mask_to_apicid(TARGET_CPUS); 1658 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus());
1666 entry.delivery_mode = INT_DELIVERY_MODE; 1659 entry.delivery_mode = apic->irq_delivery_mode;
1667 entry.polarity = 0; 1660 entry.polarity = 0;
1668 entry.trigger = 0; 1661 entry.trigger = 0;
1669 entry.vector = vector; 1662 entry.vector = vector;
@@ -1677,7 +1670,7 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1677 /* 1670 /*
1678 * Add it to the IO-APIC irq-routing table: 1671 * Add it to the IO-APIC irq-routing table:
1679 */ 1672 */
1680 ioapic_write_entry(apic, pin, entry); 1673 ioapic_write_entry(apic_id, pin, entry);
1681} 1674}
1682 1675
1683 1676
@@ -1699,7 +1692,7 @@ __apicdebuginit(void) print_IO_APIC(void)
1699 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); 1692 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1700 for (i = 0; i < nr_ioapics; i++) 1693 for (i = 0; i < nr_ioapics; i++)
1701 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", 1694 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1702 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]); 1695 mp_ioapics[i].apicid, nr_ioapic_registers[i]);
1703 1696
1704 /* 1697 /*
1705 * We are a bit conservative about what we expect. We have to 1698 * We are a bit conservative about what we expect. We have to
@@ -1719,7 +1712,7 @@ __apicdebuginit(void) print_IO_APIC(void)
1719 spin_unlock_irqrestore(&ioapic_lock, flags); 1712 spin_unlock_irqrestore(&ioapic_lock, flags);
1720 1713
1721 printk("\n"); 1714 printk("\n");
1722 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid); 1715 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
1723 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); 1716 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1724 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); 1717 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1725 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); 1718 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
@@ -1980,13 +1973,6 @@ void __init enable_IO_APIC(void)
1980 int apic; 1973 int apic;
1981 unsigned long flags; 1974 unsigned long flags;
1982 1975
1983#ifdef CONFIG_X86_32
1984 int i;
1985 if (!pirqs_enabled)
1986 for (i = 0; i < MAX_PIRQS; i++)
1987 pirq_entries[i] = -1;
1988#endif
1989
1990 /* 1976 /*
1991 * The number of IO-APIC IRQ registers (== #pins): 1977 * The number of IO-APIC IRQ registers (== #pins):
1992 */ 1978 */
@@ -2090,7 +2076,7 @@ static void __init setup_ioapic_ids_from_mpc(void)
2090{ 2076{
2091 union IO_APIC_reg_00 reg_00; 2077 union IO_APIC_reg_00 reg_00;
2092 physid_mask_t phys_id_present_map; 2078 physid_mask_t phys_id_present_map;
2093 int apic; 2079 int apic_id;
2094 int i; 2080 int i;
2095 unsigned char old_id; 2081 unsigned char old_id;
2096 unsigned long flags; 2082 unsigned long flags;
@@ -2109,26 +2095,26 @@ static void __init setup_ioapic_ids_from_mpc(void)
2109 * This is broken; anything with a real cpu count has to 2095 * This is broken; anything with a real cpu count has to
2110 * circumvent this idiocy regardless. 2096 * circumvent this idiocy regardless.
2111 */ 2097 */
2112 phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map); 2098 phys_id_present_map = apic->ioapic_phys_id_map(phys_cpu_present_map);
2113 2099
2114 /* 2100 /*
2115 * Set the IOAPIC ID to the value stored in the MPC table. 2101 * Set the IOAPIC ID to the value stored in the MPC table.
2116 */ 2102 */
2117 for (apic = 0; apic < nr_ioapics; apic++) { 2103 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
2118 2104
2119 /* Read the register 0 value */ 2105 /* Read the register 0 value */
2120 spin_lock_irqsave(&ioapic_lock, flags); 2106 spin_lock_irqsave(&ioapic_lock, flags);
2121 reg_00.raw = io_apic_read(apic, 0); 2107 reg_00.raw = io_apic_read(apic_id, 0);
2122 spin_unlock_irqrestore(&ioapic_lock, flags); 2108 spin_unlock_irqrestore(&ioapic_lock, flags);
2123 2109
2124 old_id = mp_ioapics[apic].mp_apicid; 2110 old_id = mp_ioapics[apic_id].apicid;
2125 2111
2126 if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) { 2112 if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) {
2127 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", 2113 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
2128 apic, mp_ioapics[apic].mp_apicid); 2114 apic_id, mp_ioapics[apic_id].apicid);
2129 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 2115 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2130 reg_00.bits.ID); 2116 reg_00.bits.ID);
2131 mp_ioapics[apic].mp_apicid = reg_00.bits.ID; 2117 mp_ioapics[apic_id].apicid = reg_00.bits.ID;
2132 } 2118 }
2133 2119
2134 /* 2120 /*
@@ -2136,10 +2122,10 @@ static void __init setup_ioapic_ids_from_mpc(void)
2136 * system must have a unique ID or we get lots of nice 2122 * system must have a unique ID or we get lots of nice
2137 * 'stuck on smp_invalidate_needed IPI wait' messages. 2123 * 'stuck on smp_invalidate_needed IPI wait' messages.
2138 */ 2124 */
2139 if (check_apicid_used(phys_id_present_map, 2125 if (apic->check_apicid_used(phys_id_present_map,
2140 mp_ioapics[apic].mp_apicid)) { 2126 mp_ioapics[apic_id].apicid)) {
2141 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", 2127 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2142 apic, mp_ioapics[apic].mp_apicid); 2128 apic_id, mp_ioapics[apic_id].apicid);
2143 for (i = 0; i < get_physical_broadcast(); i++) 2129 for (i = 0; i < get_physical_broadcast(); i++)
2144 if (!physid_isset(i, phys_id_present_map)) 2130 if (!physid_isset(i, phys_id_present_map))
2145 break; 2131 break;
@@ -2148,13 +2134,13 @@ static void __init setup_ioapic_ids_from_mpc(void)
2148 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 2134 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2149 i); 2135 i);
2150 physid_set(i, phys_id_present_map); 2136 physid_set(i, phys_id_present_map);
2151 mp_ioapics[apic].mp_apicid = i; 2137 mp_ioapics[apic_id].apicid = i;
2152 } else { 2138 } else {
2153 physid_mask_t tmp; 2139 physid_mask_t tmp;
2154 tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid); 2140 tmp = apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid);
2155 apic_printk(APIC_VERBOSE, "Setting %d in the " 2141 apic_printk(APIC_VERBOSE, "Setting %d in the "
2156 "phys_id_present_map\n", 2142 "phys_id_present_map\n",
2157 mp_ioapics[apic].mp_apicid); 2143 mp_ioapics[apic_id].apicid);
2158 physids_or(phys_id_present_map, phys_id_present_map, tmp); 2144 physids_or(phys_id_present_map, phys_id_present_map, tmp);
2159 } 2145 }
2160 2146
@@ -2163,11 +2149,11 @@ static void __init setup_ioapic_ids_from_mpc(void)
2163 * We need to adjust the IRQ routing table 2149 * We need to adjust the IRQ routing table
2164 * if the ID changed. 2150 * if the ID changed.
2165 */ 2151 */
2166 if (old_id != mp_ioapics[apic].mp_apicid) 2152 if (old_id != mp_ioapics[apic_id].apicid)
2167 for (i = 0; i < mp_irq_entries; i++) 2153 for (i = 0; i < mp_irq_entries; i++)
2168 if (mp_irqs[i].mp_dstapic == old_id) 2154 if (mp_irqs[i].dstapic == old_id)
2169 mp_irqs[i].mp_dstapic 2155 mp_irqs[i].dstapic
2170 = mp_ioapics[apic].mp_apicid; 2156 = mp_ioapics[apic_id].apicid;
2171 2157
2172 /* 2158 /*
2173 * Read the right value from the MPC table and 2159 * Read the right value from the MPC table and
@@ -2175,20 +2161,20 @@ static void __init setup_ioapic_ids_from_mpc(void)
2175 */ 2161 */
2176 apic_printk(APIC_VERBOSE, KERN_INFO 2162 apic_printk(APIC_VERBOSE, KERN_INFO
2177 "...changing IO-APIC physical APIC ID to %d ...", 2163 "...changing IO-APIC physical APIC ID to %d ...",
2178 mp_ioapics[apic].mp_apicid); 2164 mp_ioapics[apic_id].apicid);
2179 2165
2180 reg_00.bits.ID = mp_ioapics[apic].mp_apicid; 2166 reg_00.bits.ID = mp_ioapics[apic_id].apicid;
2181 spin_lock_irqsave(&ioapic_lock, flags); 2167 spin_lock_irqsave(&ioapic_lock, flags);
2182 io_apic_write(apic, 0, reg_00.raw); 2168 io_apic_write(apic_id, 0, reg_00.raw);
2183 spin_unlock_irqrestore(&ioapic_lock, flags); 2169 spin_unlock_irqrestore(&ioapic_lock, flags);
2184 2170
2185 /* 2171 /*
2186 * Sanity check 2172 * Sanity check
2187 */ 2173 */
2188 spin_lock_irqsave(&ioapic_lock, flags); 2174 spin_lock_irqsave(&ioapic_lock, flags);
2189 reg_00.raw = io_apic_read(apic, 0); 2175 reg_00.raw = io_apic_read(apic_id, 0);
2190 spin_unlock_irqrestore(&ioapic_lock, flags); 2176 spin_unlock_irqrestore(&ioapic_lock, flags);
2191 if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid) 2177 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
2192 printk("could not set ID!\n"); 2178 printk("could not set ID!\n");
2193 else 2179 else
2194 apic_printk(APIC_VERBOSE, " ok.\n"); 2180 apic_printk(APIC_VERBOSE, " ok.\n");
@@ -2291,7 +2277,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
2291 unsigned long flags; 2277 unsigned long flags;
2292 2278
2293 spin_lock_irqsave(&vector_lock, flags); 2279 spin_lock_irqsave(&vector_lock, flags);
2294 send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); 2280 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2295 spin_unlock_irqrestore(&vector_lock, flags); 2281 spin_unlock_irqrestore(&vector_lock, flags);
2296 2282
2297 return 1; 2283 return 1;
@@ -2299,7 +2285,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
2299#else 2285#else
2300static int ioapic_retrigger_irq(unsigned int irq) 2286static int ioapic_retrigger_irq(unsigned int irq)
2301{ 2287{
2302 send_IPI_self(irq_cfg(irq)->vector); 2288 apic->send_IPI_self(irq_cfg(irq)->vector);
2303 2289
2304 return 1; 2290 return 1;
2305} 2291}
@@ -2363,7 +2349,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2363 2349
2364 set_extra_move_desc(desc, mask); 2350 set_extra_move_desc(desc, mask);
2365 2351
2366 dest = cpu_mask_to_apicid_and(cfg->domain, mask); 2352 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
2367 2353
2368 modify_ioapic_rte = desc->status & IRQ_LEVEL; 2354 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2369 if (modify_ioapic_rte) { 2355 if (modify_ioapic_rte) {
@@ -2383,7 +2369,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2383 if (cfg->move_in_progress) 2369 if (cfg->move_in_progress)
2384 send_cleanup_vector(cfg); 2370 send_cleanup_vector(cfg);
2385 2371
2386 cpumask_copy(&desc->affinity, mask); 2372 cpumask_copy(desc->affinity, mask);
2387} 2373}
2388 2374
2389static int migrate_irq_remapped_level_desc(struct irq_desc *desc) 2375static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
@@ -2405,11 +2391,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
2405 } 2391 }
2406 2392
2407 /* everthing is clear. we have right of way */ 2393 /* everthing is clear. we have right of way */
2408 migrate_ioapic_irq_desc(desc, &desc->pending_mask); 2394 migrate_ioapic_irq_desc(desc, desc->pending_mask);
2409 2395
2410 ret = 0; 2396 ret = 0;
2411 desc->status &= ~IRQ_MOVE_PENDING; 2397 desc->status &= ~IRQ_MOVE_PENDING;
2412 cpumask_clear(&desc->pending_mask); 2398 cpumask_clear(desc->pending_mask);
2413 2399
2414unmask: 2400unmask:
2415 unmask_IO_APIC_irq_desc(desc); 2401 unmask_IO_APIC_irq_desc(desc);
@@ -2434,7 +2420,7 @@ static void ir_irq_migration(struct work_struct *work)
2434 continue; 2420 continue;
2435 } 2421 }
2436 2422
2437 desc->chip->set_affinity(irq, &desc->pending_mask); 2423 desc->chip->set_affinity(irq, desc->pending_mask);
2438 spin_unlock_irqrestore(&desc->lock, flags); 2424 spin_unlock_irqrestore(&desc->lock, flags);
2439 } 2425 }
2440 } 2426 }
@@ -2448,7 +2434,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2448{ 2434{
2449 if (desc->status & IRQ_LEVEL) { 2435 if (desc->status & IRQ_LEVEL) {
2450 desc->status |= IRQ_MOVE_PENDING; 2436 desc->status |= IRQ_MOVE_PENDING;
2451 cpumask_copy(&desc->pending_mask, mask); 2437 cpumask_copy(desc->pending_mask, mask);
2452 migrate_irq_remapped_level_desc(desc); 2438 migrate_irq_remapped_level_desc(desc);
2453 return; 2439 return;
2454 } 2440 }
@@ -2516,7 +2502,7 @@ static void irq_complete_move(struct irq_desc **descp)
2516 2502
2517 /* domain has not changed, but affinity did */ 2503 /* domain has not changed, but affinity did */
2518 me = smp_processor_id(); 2504 me = smp_processor_id();
2519 if (cpu_isset(me, desc->affinity)) { 2505 if (cpumask_test_cpu(me, desc->affinity)) {
2520 *descp = desc = move_irq_desc(desc, me); 2506 *descp = desc = move_irq_desc(desc, me);
2521 /* get the new one */ 2507 /* get the new one */
2522 cfg = desc->chip_data; 2508 cfg = desc->chip_data;
@@ -2867,19 +2853,15 @@ static inline void __init check_timer(void)
2867 int cpu = boot_cpu_id; 2853 int cpu = boot_cpu_id;
2868 int apic1, pin1, apic2, pin2; 2854 int apic1, pin1, apic2, pin2;
2869 unsigned long flags; 2855 unsigned long flags;
2870 unsigned int ver;
2871 int no_pin1 = 0; 2856 int no_pin1 = 0;
2872 2857
2873 local_irq_save(flags); 2858 local_irq_save(flags);
2874 2859
2875 ver = apic_read(APIC_LVR);
2876 ver = GET_APIC_VERSION(ver);
2877
2878 /* 2860 /*
2879 * get/set the timer IRQ vector: 2861 * get/set the timer IRQ vector:
2880 */ 2862 */
2881 disable_8259A_irq(0); 2863 disable_8259A_irq(0);
2882 assign_irq_vector(0, cfg, TARGET_CPUS); 2864 assign_irq_vector(0, cfg, apic->target_cpus());
2883 2865
2884 /* 2866 /*
2885 * As IRQ0 is to be enabled in the 8259A, the virtual 2867 * As IRQ0 is to be enabled in the 8259A, the virtual
@@ -2893,7 +2875,13 @@ static inline void __init check_timer(void)
2893 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 2875 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2894 init_8259A(1); 2876 init_8259A(1);
2895#ifdef CONFIG_X86_32 2877#ifdef CONFIG_X86_32
2896 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver)); 2878 {
2879 unsigned int ver;
2880
2881 ver = apic_read(APIC_LVR);
2882 ver = GET_APIC_VERSION(ver);
2883 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
2884 }
2897#endif 2885#endif
2898 2886
2899 pin1 = find_isa_irq_pin(0, mp_INT); 2887 pin1 = find_isa_irq_pin(0, mp_INT);
@@ -2932,8 +2920,17 @@ static inline void __init check_timer(void)
2932 if (no_pin1) { 2920 if (no_pin1) {
2933 add_pin_to_irq_cpu(cfg, cpu, apic1, pin1); 2921 add_pin_to_irq_cpu(cfg, cpu, apic1, pin1);
2934 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2922 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2923 } else {
2924 /* for edge trigger, setup_IO_APIC_irq already
2925 * leave it unmasked.
2926 * so only need to unmask if it is level-trigger
2927 * do we really have level trigger timer?
2928 */
2929 int idx;
2930 idx = find_irq_entry(apic1, pin1, mp_INT);
2931 if (idx != -1 && irq_trigger(idx))
2932 unmask_IO_APIC_irq_desc(desc);
2935 } 2933 }
2936 unmask_IO_APIC_irq_desc(desc);
2937 if (timer_irq_works()) { 2934 if (timer_irq_works()) {
2938 if (nmi_watchdog == NMI_IO_APIC) { 2935 if (nmi_watchdog == NMI_IO_APIC) {
2939 setup_nmi(); 2936 setup_nmi();
@@ -2947,6 +2944,7 @@ static inline void __init check_timer(void)
2947 if (intr_remapping_enabled) 2944 if (intr_remapping_enabled)
2948 panic("timer doesn't work through Interrupt-remapped IO-APIC"); 2945 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2949#endif 2946#endif
2947 local_irq_disable();
2950 clear_IO_APIC_pin(apic1, pin1); 2948 clear_IO_APIC_pin(apic1, pin1);
2951 if (!no_pin1) 2949 if (!no_pin1)
2952 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " 2950 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
@@ -2961,7 +2959,6 @@ static inline void __init check_timer(void)
2961 */ 2959 */
2962 replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2); 2960 replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2);
2963 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2961 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2964 unmask_IO_APIC_irq_desc(desc);
2965 enable_8259A_irq(0); 2962 enable_8259A_irq(0);
2966 if (timer_irq_works()) { 2963 if (timer_irq_works()) {
2967 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2964 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
@@ -2976,6 +2973,7 @@ static inline void __init check_timer(void)
2976 /* 2973 /*
2977 * Cleanup, just in case ... 2974 * Cleanup, just in case ...
2978 */ 2975 */
2976 local_irq_disable();
2979 disable_8259A_irq(0); 2977 disable_8259A_irq(0);
2980 clear_IO_APIC_pin(apic2, pin2); 2978 clear_IO_APIC_pin(apic2, pin2);
2981 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 2979 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
@@ -3001,6 +2999,7 @@ static inline void __init check_timer(void)
3001 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2999 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
3002 goto out; 3000 goto out;
3003 } 3001 }
3002 local_irq_disable();
3004 disable_8259A_irq(0); 3003 disable_8259A_irq(0);
3005 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); 3004 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
3006 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 3005 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
@@ -3018,6 +3017,7 @@ static inline void __init check_timer(void)
3018 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 3017 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
3019 goto out; 3018 goto out;
3020 } 3019 }
3020 local_irq_disable();
3021 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); 3021 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
3022 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " 3022 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
3023 "report. Then try booting with the 'noapic' option.\n"); 3023 "report. Then try booting with the 'noapic' option.\n");
@@ -3047,13 +3047,9 @@ out:
3047void __init setup_IO_APIC(void) 3047void __init setup_IO_APIC(void)
3048{ 3048{
3049 3049
3050#ifdef CONFIG_X86_32
3051 enable_IO_APIC();
3052#else
3053 /* 3050 /*
3054 * calling enable_IO_APIC() is moved to setup_local_APIC for BP 3051 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
3055 */ 3052 */
3056#endif
3057 3053
3058 io_apic_irqs = ~PIC_IRQS; 3054 io_apic_irqs = ~PIC_IRQS;
3059 3055
@@ -3118,8 +3114,8 @@ static int ioapic_resume(struct sys_device *dev)
3118 3114
3119 spin_lock_irqsave(&ioapic_lock, flags); 3115 spin_lock_irqsave(&ioapic_lock, flags);
3120 reg_00.raw = io_apic_read(dev->id, 0); 3116 reg_00.raw = io_apic_read(dev->id, 0);
3121 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) { 3117 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
3122 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid; 3118 reg_00.bits.ID = mp_ioapics[dev->id].apicid;
3123 io_apic_write(dev->id, 0, reg_00.raw); 3119 io_apic_write(dev->id, 0, reg_00.raw);
3124 } 3120 }
3125 spin_unlock_irqrestore(&ioapic_lock, flags); 3121 spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -3169,6 +3165,7 @@ static int __init ioapic_init_sysfs(void)
3169 3165
3170device_initcall(ioapic_init_sysfs); 3166device_initcall(ioapic_init_sysfs);
3171 3167
3168static int nr_irqs_gsi = NR_IRQS_LEGACY;
3172/* 3169/*
3173 * Dynamic irq allocate and deallocation 3170 * Dynamic irq allocate and deallocation
3174 */ 3171 */
@@ -3183,11 +3180,11 @@ unsigned int create_irq_nr(unsigned int irq_want)
3183 struct irq_desc *desc_new = NULL; 3180 struct irq_desc *desc_new = NULL;
3184 3181
3185 irq = 0; 3182 irq = 0;
3186 spin_lock_irqsave(&vector_lock, flags); 3183 if (irq_want < nr_irqs_gsi)
3187 for (new = irq_want; new < NR_IRQS; new++) { 3184 irq_want = nr_irqs_gsi;
3188 if (platform_legacy_irq(new))
3189 continue;
3190 3185
3186 spin_lock_irqsave(&vector_lock, flags);
3187 for (new = irq_want; new < nr_irqs; new++) {
3191 desc_new = irq_to_desc_alloc_cpu(new, cpu); 3188 desc_new = irq_to_desc_alloc_cpu(new, cpu);
3192 if (!desc_new) { 3189 if (!desc_new) {
3193 printk(KERN_INFO "can not get irq_desc for %d\n", new); 3190 printk(KERN_INFO "can not get irq_desc for %d\n", new);
@@ -3197,7 +3194,7 @@ unsigned int create_irq_nr(unsigned int irq_want)
3197 3194
3198 if (cfg_new->vector != 0) 3195 if (cfg_new->vector != 0)
3199 continue; 3196 continue;
3200 if (__assign_irq_vector(new, cfg_new, TARGET_CPUS) == 0) 3197 if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0)
3201 irq = new; 3198 irq = new;
3202 break; 3199 break;
3203 } 3200 }
@@ -3212,7 +3209,6 @@ unsigned int create_irq_nr(unsigned int irq_want)
3212 return irq; 3209 return irq;
3213} 3210}
3214 3211
3215static int nr_irqs_gsi = NR_IRQS_LEGACY;
3216int create_irq(void) 3212int create_irq(void)
3217{ 3213{
3218 unsigned int irq_want; 3214 unsigned int irq_want;
@@ -3259,12 +3255,15 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3259 int err; 3255 int err;
3260 unsigned dest; 3256 unsigned dest;
3261 3257
3258 if (disable_apic)
3259 return -ENXIO;
3260
3262 cfg = irq_cfg(irq); 3261 cfg = irq_cfg(irq);
3263 err = assign_irq_vector(irq, cfg, TARGET_CPUS); 3262 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3264 if (err) 3263 if (err)
3265 return err; 3264 return err;
3266 3265
3267 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); 3266 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
3268 3267
3269#ifdef CONFIG_INTR_REMAP 3268#ifdef CONFIG_INTR_REMAP
3270 if (irq_remapped(irq)) { 3269 if (irq_remapped(irq)) {
@@ -3278,9 +3277,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3278 memset (&irte, 0, sizeof(irte)); 3277 memset (&irte, 0, sizeof(irte));
3279 3278
3280 irte.present = 1; 3279 irte.present = 1;
3281 irte.dst_mode = INT_DEST_MODE; 3280 irte.dst_mode = apic->irq_dest_mode;
3282 irte.trigger_mode = 0; /* edge */ 3281 irte.trigger_mode = 0; /* edge */
3283 irte.dlvry_mode = INT_DELIVERY_MODE; 3282 irte.dlvry_mode = apic->irq_delivery_mode;
3284 irte.vector = cfg->vector; 3283 irte.vector = cfg->vector;
3285 irte.dest_id = IRTE_DEST(dest); 3284 irte.dest_id = IRTE_DEST(dest);
3286 3285
@@ -3298,10 +3297,10 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3298 msg->address_hi = MSI_ADDR_BASE_HI; 3297 msg->address_hi = MSI_ADDR_BASE_HI;
3299 msg->address_lo = 3298 msg->address_lo =
3300 MSI_ADDR_BASE_LO | 3299 MSI_ADDR_BASE_LO |
3301 ((INT_DEST_MODE == 0) ? 3300 ((apic->irq_dest_mode == 0) ?
3302 MSI_ADDR_DEST_MODE_PHYSICAL: 3301 MSI_ADDR_DEST_MODE_PHYSICAL:
3303 MSI_ADDR_DEST_MODE_LOGICAL) | 3302 MSI_ADDR_DEST_MODE_LOGICAL) |
3304 ((INT_DELIVERY_MODE != dest_LowestPrio) ? 3303 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3305 MSI_ADDR_REDIRECTION_CPU: 3304 MSI_ADDR_REDIRECTION_CPU:
3306 MSI_ADDR_REDIRECTION_LOWPRI) | 3305 MSI_ADDR_REDIRECTION_LOWPRI) |
3307 MSI_ADDR_DEST_ID(dest); 3306 MSI_ADDR_DEST_ID(dest);
@@ -3309,7 +3308,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3309 msg->data = 3308 msg->data =
3310 MSI_DATA_TRIGGER_EDGE | 3309 MSI_DATA_TRIGGER_EDGE |
3311 MSI_DATA_LEVEL_ASSERT | 3310 MSI_DATA_LEVEL_ASSERT |
3312 ((INT_DELIVERY_MODE != dest_LowestPrio) ? 3311 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3313 MSI_DATA_DELIVERY_FIXED: 3312 MSI_DATA_DELIVERY_FIXED:
3314 MSI_DATA_DELIVERY_LOWPRI) | 3313 MSI_DATA_DELIVERY_LOWPRI) |
3315 MSI_DATA_VECTOR(cfg->vector); 3314 MSI_DATA_VECTOR(cfg->vector);
@@ -3464,40 +3463,6 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3464 return 0; 3463 return 0;
3465} 3464}
3466 3465
3467int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
3468{
3469 unsigned int irq;
3470 int ret;
3471 unsigned int irq_want;
3472
3473 irq_want = nr_irqs_gsi;
3474 irq = create_irq_nr(irq_want);
3475 if (irq == 0)
3476 return -1;
3477
3478#ifdef CONFIG_INTR_REMAP
3479 if (!intr_remapping_enabled)
3480 goto no_ir;
3481
3482 ret = msi_alloc_irte(dev, irq, 1);
3483 if (ret < 0)
3484 goto error;
3485no_ir:
3486#endif
3487 ret = setup_msi_irq(dev, msidesc, irq);
3488 if (ret < 0) {
3489 destroy_irq(irq);
3490 return ret;
3491 }
3492 return 0;
3493
3494#ifdef CONFIG_INTR_REMAP
3495error:
3496 destroy_irq(irq);
3497 return ret;
3498#endif
3499}
3500
3501int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3466int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3502{ 3467{
3503 unsigned int irq; 3468 unsigned int irq;
@@ -3514,9 +3479,9 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3514 sub_handle = 0; 3479 sub_handle = 0;
3515 list_for_each_entry(msidesc, &dev->msi_list, list) { 3480 list_for_each_entry(msidesc, &dev->msi_list, list) {
3516 irq = create_irq_nr(irq_want); 3481 irq = create_irq_nr(irq_want);
3517 irq_want++;
3518 if (irq == 0) 3482 if (irq == 0)
3519 return -1; 3483 return -1;
3484 irq_want = irq + 1;
3520#ifdef CONFIG_INTR_REMAP 3485#ifdef CONFIG_INTR_REMAP
3521 if (!intr_remapping_enabled) 3486 if (!intr_remapping_enabled)
3522 goto no_ir; 3487 goto no_ir;
@@ -3727,13 +3692,17 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3727 struct irq_cfg *cfg; 3692 struct irq_cfg *cfg;
3728 int err; 3693 int err;
3729 3694
3695 if (disable_apic)
3696 return -ENXIO;
3697
3730 cfg = irq_cfg(irq); 3698 cfg = irq_cfg(irq);
3731 err = assign_irq_vector(irq, cfg, TARGET_CPUS); 3699 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3732 if (!err) { 3700 if (!err) {
3733 struct ht_irq_msg msg; 3701 struct ht_irq_msg msg;
3734 unsigned dest; 3702 unsigned dest;
3735 3703
3736 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); 3704 dest = apic->cpu_mask_to_apicid_and(cfg->domain,
3705 apic->target_cpus());
3737 3706
3738 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3707 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3739 3708
@@ -3741,11 +3710,11 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3741 HT_IRQ_LOW_BASE | 3710 HT_IRQ_LOW_BASE |
3742 HT_IRQ_LOW_DEST_ID(dest) | 3711 HT_IRQ_LOW_DEST_ID(dest) |
3743 HT_IRQ_LOW_VECTOR(cfg->vector) | 3712 HT_IRQ_LOW_VECTOR(cfg->vector) |
3744 ((INT_DEST_MODE == 0) ? 3713 ((apic->irq_dest_mode == 0) ?
3745 HT_IRQ_LOW_DM_PHYSICAL : 3714 HT_IRQ_LOW_DM_PHYSICAL :
3746 HT_IRQ_LOW_DM_LOGICAL) | 3715 HT_IRQ_LOW_DM_LOGICAL) |
3747 HT_IRQ_LOW_RQEOI_EDGE | 3716 HT_IRQ_LOW_RQEOI_EDGE |
3748 ((INT_DELIVERY_MODE != dest_LowestPrio) ? 3717 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3749 HT_IRQ_LOW_MT_FIXED : 3718 HT_IRQ_LOW_MT_FIXED :
3750 HT_IRQ_LOW_MT_ARBITRATED) | 3719 HT_IRQ_LOW_MT_ARBITRATED) |
3751 HT_IRQ_LOW_IRQ_MASKED; 3720 HT_IRQ_LOW_IRQ_MASKED;
@@ -3761,7 +3730,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3761} 3730}
3762#endif /* CONFIG_HT_IRQ */ 3731#endif /* CONFIG_HT_IRQ */
3763 3732
3764#ifdef CONFIG_X86_64 3733#ifdef CONFIG_X86_UV
3765/* 3734/*
3766 * Re-target the irq to the specified CPU and enable the specified MMR located 3735 * Re-target the irq to the specified CPU and enable the specified MMR located
3767 * on the specified blade to allow the sending of MSIs to the specified CPU. 3736 * on the specified blade to allow the sending of MSIs to the specified CPU.
@@ -3793,12 +3762,12 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3793 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); 3762 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
3794 3763
3795 entry->vector = cfg->vector; 3764 entry->vector = cfg->vector;
3796 entry->delivery_mode = INT_DELIVERY_MODE; 3765 entry->delivery_mode = apic->irq_delivery_mode;
3797 entry->dest_mode = INT_DEST_MODE; 3766 entry->dest_mode = apic->irq_dest_mode;
3798 entry->polarity = 0; 3767 entry->polarity = 0;
3799 entry->trigger = 0; 3768 entry->trigger = 0;
3800 entry->mask = 0; 3769 entry->mask = 0;
3801 entry->dest = cpu_mask_to_apicid(eligible_cpu); 3770 entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
3802 3771
3803 mmr_pnode = uv_blade_to_pnode(mmr_blade); 3772 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3804 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); 3773 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
@@ -3861,6 +3830,28 @@ void __init probe_nr_irqs_gsi(void)
3861 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); 3830 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
3862} 3831}
3863 3832
3833#ifdef CONFIG_SPARSE_IRQ
3834int __init arch_probe_nr_irqs(void)
3835{
3836 int nr;
3837
3838 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
3839 nr_irqs = NR_VECTORS * nr_cpu_ids;
3840
3841 nr = nr_irqs_gsi + 8 * nr_cpu_ids;
3842#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3843 /*
3844 * for MSI and HT dyn irq
3845 */
3846 nr += nr_irqs_gsi * 16;
3847#endif
3848 if (nr < nr_irqs)
3849 nr_irqs = nr;
3850
3851 return 0;
3852}
3853#endif
3854
3864/* -------------------------------------------------------------------------- 3855/* --------------------------------------------------------------------------
3865 ACPI-based IOAPIC Configuration 3856 ACPI-based IOAPIC Configuration
3866 -------------------------------------------------------------------------- */ 3857 -------------------------------------------------------------------------- */
@@ -3886,7 +3877,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
3886 */ 3877 */
3887 3878
3888 if (physids_empty(apic_id_map)) 3879 if (physids_empty(apic_id_map))
3889 apic_id_map = ioapic_phys_id_map(phys_cpu_present_map); 3880 apic_id_map = apic->ioapic_phys_id_map(phys_cpu_present_map);
3890 3881
3891 spin_lock_irqsave(&ioapic_lock, flags); 3882 spin_lock_irqsave(&ioapic_lock, flags);
3892 reg_00.raw = io_apic_read(ioapic, 0); 3883 reg_00.raw = io_apic_read(ioapic, 0);
@@ -3902,10 +3893,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
3902 * Every APIC in a system must have a unique ID or we get lots of nice 3893 * Every APIC in a system must have a unique ID or we get lots of nice
3903 * 'stuck on smp_invalidate_needed IPI wait' messages. 3894 * 'stuck on smp_invalidate_needed IPI wait' messages.
3904 */ 3895 */
3905 if (check_apicid_used(apic_id_map, apic_id)) { 3896 if (apic->check_apicid_used(apic_id_map, apic_id)) {
3906 3897
3907 for (i = 0; i < get_physical_broadcast(); i++) { 3898 for (i = 0; i < get_physical_broadcast(); i++) {
3908 if (!check_apicid_used(apic_id_map, i)) 3899 if (!apic->check_apicid_used(apic_id_map, i))
3909 break; 3900 break;
3910 } 3901 }
3911 3902
@@ -3918,7 +3909,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
3918 apic_id = i; 3909 apic_id = i;
3919 } 3910 }
3920 3911
3921 tmp = apicid_to_cpu_present(apic_id); 3912 tmp = apic->apicid_to_cpu_present(apic_id);
3922 physids_or(apic_id_map, apic_id_map, tmp); 3913 physids_or(apic_id_map, apic_id_map, tmp);
3923 3914
3924 if (reg_00.bits.ID != apic_id) { 3915 if (reg_00.bits.ID != apic_id) {
@@ -3995,8 +3986,8 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
3995 return -1; 3986 return -1;
3996 3987
3997 for (i = 0; i < mp_irq_entries; i++) 3988 for (i = 0; i < mp_irq_entries; i++)
3998 if (mp_irqs[i].mp_irqtype == mp_INT && 3989 if (mp_irqs[i].irqtype == mp_INT &&
3999 mp_irqs[i].mp_srcbusirq == bus_irq) 3990 mp_irqs[i].srcbusirq == bus_irq)
4000 break; 3991 break;
4001 if (i >= mp_irq_entries) 3992 if (i >= mp_irq_entries)
4002 return -1; 3993 return -1;
@@ -4011,7 +4002,7 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
4011/* 4002/*
4012 * This function currently is only a helper for the i386 smp boot process where 4003 * This function currently is only a helper for the i386 smp boot process where
4013 * we need to reprogram the ioredtbls to cater for the cpus which have come online 4004 * we need to reprogram the ioredtbls to cater for the cpus which have come online
4014 * so mask in all cases should simply be TARGET_CPUS 4005 * so mask in all cases should simply be apic->target_cpus()
4015 */ 4006 */
4016#ifdef CONFIG_SMP 4007#ifdef CONFIG_SMP
4017void __init setup_ioapic_dest(void) 4008void __init setup_ioapic_dest(void)
@@ -4050,9 +4041,9 @@ void __init setup_ioapic_dest(void)
4050 */ 4041 */
4051 if (desc->status & 4042 if (desc->status &
4052 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 4043 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
4053 mask = &desc->affinity; 4044 mask = desc->affinity;
4054 else 4045 else
4055 mask = TARGET_CPUS; 4046 mask = apic->target_cpus();
4056 4047
4057#ifdef CONFIG_INTR_REMAP 4048#ifdef CONFIG_INTR_REMAP
4058 if (intr_remapping_enabled) 4049 if (intr_remapping_enabled)
@@ -4111,7 +4102,7 @@ void __init ioapic_init_mappings(void)
4111 ioapic_res = ioapic_setup_resources(); 4102 ioapic_res = ioapic_setup_resources();
4112 for (i = 0; i < nr_ioapics; i++) { 4103 for (i = 0; i < nr_ioapics; i++) {
4113 if (smp_found_config) { 4104 if (smp_found_config) {
4114 ioapic_phys = mp_ioapics[i].mp_apicaddr; 4105 ioapic_phys = mp_ioapics[i].apicaddr;
4115#ifdef CONFIG_X86_32 4106#ifdef CONFIG_X86_32
4116 if (!ioapic_phys) { 4107 if (!ioapic_phys) {
4117 printk(KERN_ERR 4108 printk(KERN_ERR
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
new file mode 100644
index 00000000000..dbf5445727a
--- /dev/null
+++ b/arch/x86/kernel/apic/ipi.c
@@ -0,0 +1,164 @@
1#include <linux/cpumask.h>
2#include <linux/interrupt.h>
3#include <linux/init.h>
4
5#include <linux/mm.h>
6#include <linux/delay.h>
7#include <linux/spinlock.h>
8#include <linux/kernel_stat.h>
9#include <linux/mc146818rtc.h>
10#include <linux/cache.h>
11#include <linux/cpu.h>
12#include <linux/module.h>
13
14#include <asm/smp.h>
15#include <asm/mtrr.h>
16#include <asm/tlbflush.h>
17#include <asm/mmu_context.h>
18#include <asm/apic.h>
19#include <asm/proto.h>
20#include <asm/ipi.h>
21
22void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
23{
24 unsigned long query_cpu;
25 unsigned long flags;
26
27 /*
28 * Hack. The clustered APIC addressing mode doesn't allow us to send
29 * to an arbitrary mask, so I do a unicast to each CPU instead.
30 * - mbligh
31 */
32 local_irq_save(flags);
33 for_each_cpu(query_cpu, mask) {
34 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
35 query_cpu), vector, APIC_DEST_PHYSICAL);
36 }
37 local_irq_restore(flags);
38}
39
40void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
41 int vector)
42{
43 unsigned int this_cpu = smp_processor_id();
44 unsigned int query_cpu;
45 unsigned long flags;
46
47 /* See Hack comment above */
48
49 local_irq_save(flags);
50 for_each_cpu(query_cpu, mask) {
51 if (query_cpu == this_cpu)
52 continue;
53 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
54 query_cpu), vector, APIC_DEST_PHYSICAL);
55 }
56 local_irq_restore(flags);
57}
58
59void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
60 int vector)
61{
62 unsigned long flags;
63 unsigned int query_cpu;
64
65 /*
66 * Hack. The clustered APIC addressing mode doesn't allow us to send
67 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
68 * should be modified to do 1 message per cluster ID - mbligh
69 */
70
71 local_irq_save(flags);
72 for_each_cpu(query_cpu, mask)
73 __default_send_IPI_dest_field(
74 apic->cpu_to_logical_apicid(query_cpu), vector,
75 apic->dest_logical);
76 local_irq_restore(flags);
77}
78
79void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
80 int vector)
81{
82 unsigned long flags;
83 unsigned int query_cpu;
84 unsigned int this_cpu = smp_processor_id();
85
86 /* See Hack comment above */
87
88 local_irq_save(flags);
89 for_each_cpu(query_cpu, mask) {
90 if (query_cpu == this_cpu)
91 continue;
92 __default_send_IPI_dest_field(
93 apic->cpu_to_logical_apicid(query_cpu), vector,
94 apic->dest_logical);
95 }
96 local_irq_restore(flags);
97}
98
99#ifdef CONFIG_X86_32
100
101/*
102 * This is only used on smaller machines.
103 */
104void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
105{
106 unsigned long mask = cpumask_bits(cpumask)[0];
107 unsigned long flags;
108
109 local_irq_save(flags);
110 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
111 __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
112 local_irq_restore(flags);
113}
114
115void default_send_IPI_allbutself(int vector)
116{
117 /*
118 * if there are no other CPUs in the system then we get an APIC send
119 * error if we try to broadcast, thus avoid sending IPIs in this case.
120 */
121 if (!(num_online_cpus() > 1))
122 return;
123
124 __default_local_send_IPI_allbutself(vector);
125}
126
127void default_send_IPI_all(int vector)
128{
129 __default_local_send_IPI_all(vector);
130}
131
132void default_send_IPI_self(int vector)
133{
134 __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
135}
136
137/* must come after the send_IPI functions above for inlining */
138static int convert_apicid_to_cpu(int apic_id)
139{
140 int i;
141
142 for_each_possible_cpu(i) {
143 if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
144 return i;
145 }
146 return -1;
147}
148
149int safe_smp_processor_id(void)
150{
151 int apicid, cpuid;
152
153 if (!boot_cpu_has(X86_FEATURE_APIC))
154 return 0;
155
156 apicid = hard_smp_processor_id();
157 if (apicid == BAD_APICID)
158 return 0;
159
160 cpuid = convert_apicid_to_cpu(apicid);
161
162 return cpuid >= 0 ? cpuid : 0;
163}
164#endif
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/apic/nmi.c
index 7228979f1e7..bdfad80c3cf 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -34,7 +34,7 @@
34 34
35#include <asm/mce.h> 35#include <asm/mce.h>
36 36
37#include <mach_traps.h> 37#include <asm/mach_traps.h>
38 38
39int unknown_nmi_panic; 39int unknown_nmi_panic;
40int nmi_watchdog_enabled; 40int nmi_watchdog_enabled;
@@ -61,11 +61,7 @@ static int endflag __initdata;
61 61
62static inline unsigned int get_nmi_count(int cpu) 62static inline unsigned int get_nmi_count(int cpu)
63{ 63{
64#ifdef CONFIG_X86_64 64 return per_cpu(irq_stat, cpu).__nmi_count;
65 return cpu_pda(cpu)->__nmi_count;
66#else
67 return nmi_count(cpu);
68#endif
69} 65}
70 66
71static inline int mce_in_progress(void) 67static inline int mce_in_progress(void)
@@ -82,12 +78,8 @@ static inline int mce_in_progress(void)
82 */ 78 */
83static inline unsigned int get_timer_irqs(int cpu) 79static inline unsigned int get_timer_irqs(int cpu)
84{ 80{
85#ifdef CONFIG_X86_64
86 return read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
87#else
88 return per_cpu(irq_stat, cpu).apic_timer_irqs + 81 return per_cpu(irq_stat, cpu).apic_timer_irqs +
89 per_cpu(irq_stat, cpu).irq0_irqs; 82 per_cpu(irq_stat, cpu).irq0_irqs;
90#endif
91} 83}
92 84
93#ifdef CONFIG_SMP 85#ifdef CONFIG_SMP
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
new file mode 100644
index 00000000000..ba2fc646553
--- /dev/null
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -0,0 +1,557 @@
1/*
2 * Written by: Patricia Gaughen, IBM Corporation
3 *
4 * Copyright (C) 2002, IBM Corp.
5 * Copyright (C) 2009, Red Hat, Inc., Ingo Molnar
6 *
7 * All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
17 * NON INFRINGEMENT. See the GNU General Public License for more
18 * details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Send feedback to <gone@us.ibm.com>
25 */
26#include <linux/nodemask.h>
27#include <linux/topology.h>
28#include <linux/bootmem.h>
29#include <linux/threads.h>
30#include <linux/cpumask.h>
31#include <linux/kernel.h>
32#include <linux/mmzone.h>
33#include <linux/module.h>
34#include <linux/string.h>
35#include <linux/init.h>
36#include <linux/numa.h>
37#include <linux/smp.h>
38#include <linux/io.h>
39#include <linux/mm.h>
40
41#include <asm/processor.h>
42#include <asm/fixmap.h>
43#include <asm/mpspec.h>
44#include <asm/numaq.h>
45#include <asm/setup.h>
46#include <asm/apic.h>
47#include <asm/e820.h>
48#include <asm/ipi.h>
49
50#define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT))
51
52int found_numaq;
53
54/*
55 * Have to match translation table entries to main table entries by counter
56 * hence the mpc_record variable .... can't see a less disgusting way of
57 * doing this ....
58 */
59struct mpc_trans {
60 unsigned char mpc_type;
61 unsigned char trans_len;
62 unsigned char trans_type;
63 unsigned char trans_quad;
64 unsigned char trans_global;
65 unsigned char trans_local;
66 unsigned short trans_reserved;
67};
68
69/* x86_quirks member */
70static int mpc_record;
71
72static struct mpc_trans *translation_table[MAX_MPC_ENTRY];
73
74int mp_bus_id_to_node[MAX_MP_BUSSES];
75int mp_bus_id_to_local[MAX_MP_BUSSES];
76int quad_local_to_mp_bus_id[NR_CPUS/4][4];
77
78
79static inline void numaq_register_node(int node, struct sys_cfg_data *scd)
80{
81 struct eachquadmem *eq = scd->eq + node;
82
83 node_set_online(node);
84
85 /* Convert to pages */
86 node_start_pfn[node] =
87 MB_TO_PAGES(eq->hi_shrd_mem_start - eq->priv_mem_size);
88
89 node_end_pfn[node] =
90 MB_TO_PAGES(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size);
91
92 e820_register_active_regions(node, node_start_pfn[node],
93 node_end_pfn[node]);
94
95 memory_present(node, node_start_pfn[node], node_end_pfn[node]);
96
97 node_remap_size[node] = node_memmap_size_bytes(node,
98 node_start_pfn[node],
99 node_end_pfn[node]);
100}
101
102/*
103 * Function: smp_dump_qct()
104 *
105 * Description: gets memory layout from the quad config table. This
106 * function also updates node_online_map with the nodes (quads) present.
107 */
108static void __init smp_dump_qct(void)
109{
110 struct sys_cfg_data *scd;
111 int node;
112
113 scd = (void *)__va(SYS_CFG_DATA_PRIV_ADDR);
114
115 nodes_clear(node_online_map);
116 for_each_node(node) {
117 if (scd->quads_present31_0 & (1 << node))
118 numaq_register_node(node, scd);
119 }
120}
121
122void __cpuinit numaq_tsc_disable(void)
123{
124 if (!found_numaq)
125 return;
126
127 if (num_online_nodes() > 1) {
128 printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
129 setup_clear_cpu_cap(X86_FEATURE_TSC);
130 }
131}
132
133static int __init numaq_pre_time_init(void)
134{
135 numaq_tsc_disable();
136 return 0;
137}
138
139static inline int generate_logical_apicid(int quad, int phys_apicid)
140{
141 return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1);
142}
143
144/* x86_quirks member */
145static int mpc_apic_id(struct mpc_cpu *m)
146{
147 int quad = translation_table[mpc_record]->trans_quad;
148 int logical_apicid = generate_logical_apicid(quad, m->apicid);
149
150 printk(KERN_DEBUG
151 "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n",
152 m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8,
153 (m->cpufeature & CPU_MODEL_MASK) >> 4,
154 m->apicver, quad, logical_apicid);
155
156 return logical_apicid;
157}
158
159/* x86_quirks member */
160static void mpc_oem_bus_info(struct mpc_bus *m, char *name)
161{
162 int quad = translation_table[mpc_record]->trans_quad;
163 int local = translation_table[mpc_record]->trans_local;
164
165 mp_bus_id_to_node[m->busid] = quad;
166 mp_bus_id_to_local[m->busid] = local;
167
168 printk(KERN_INFO "Bus #%d is %s (node %d)\n", m->busid, name, quad);
169}
170
171/* x86_quirks member */
172static void mpc_oem_pci_bus(struct mpc_bus *m)
173{
174 int quad = translation_table[mpc_record]->trans_quad;
175 int local = translation_table[mpc_record]->trans_local;
176
177 quad_local_to_mp_bus_id[quad][local] = m->busid;
178}
179
180static void __init MP_translation_info(struct mpc_trans *m)
181{
182 printk(KERN_INFO
183 "Translation: record %d, type %d, quad %d, global %d, local %d\n",
184 mpc_record, m->trans_type, m->trans_quad, m->trans_global,
185 m->trans_local);
186
187 if (mpc_record >= MAX_MPC_ENTRY)
188 printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
189 else
190 translation_table[mpc_record] = m; /* stash this for later */
191
192 if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
193 node_set_online(m->trans_quad);
194}
195
196static int __init mpf_checksum(unsigned char *mp, int len)
197{
198 int sum = 0;
199
200 while (len--)
201 sum += *mp++;
202
203 return sum & 0xFF;
204}
205
206/*
207 * Read/parse the MPC oem tables
208 */
209static void __init
210 smp_read_mpc_oem(struct mpc_oemtable *oemtable, unsigned short oemsize)
211{
212 int count = sizeof(*oemtable); /* the header size */
213 unsigned char *oemptr = ((unsigned char *)oemtable) + count;
214
215 mpc_record = 0;
216 printk(KERN_INFO
217 "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
218
219 if (memcmp(oemtable->signature, MPC_OEM_SIGNATURE, 4)) {
220 printk(KERN_WARNING
221 "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
222 oemtable->signature[0], oemtable->signature[1],
223 oemtable->signature[2], oemtable->signature[3]);
224 return;
225 }
226
227 if (mpf_checksum((unsigned char *)oemtable, oemtable->length)) {
228 printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
229 return;
230 }
231
232 while (count < oemtable->length) {
233 switch (*oemptr) {
234 case MP_TRANSLATION:
235 {
236 struct mpc_trans *m = (void *)oemptr;
237
238 MP_translation_info(m);
239 oemptr += sizeof(*m);
240 count += sizeof(*m);
241 ++mpc_record;
242 break;
243 }
244 default:
245 printk(KERN_WARNING
246 "Unrecognised OEM table entry type! - %d\n",
247 (int)*oemptr);
248 return;
249 }
250 }
251}
252
253static int __init numaq_setup_ioapic_ids(void)
254{
255 /* so can skip it */
256 return 1;
257}
258
259static struct x86_quirks numaq_x86_quirks __initdata = {
260 .arch_pre_time_init = numaq_pre_time_init,
261 .arch_time_init = NULL,
262 .arch_pre_intr_init = NULL,
263 .arch_memory_setup = NULL,
264 .arch_intr_init = NULL,
265 .arch_trap_init = NULL,
266 .mach_get_smp_config = NULL,
267 .mach_find_smp_config = NULL,
268 .mpc_record = &mpc_record,
269 .mpc_apic_id = mpc_apic_id,
270 .mpc_oem_bus_info = mpc_oem_bus_info,
271 .mpc_oem_pci_bus = mpc_oem_pci_bus,
272 .smp_read_mpc_oem = smp_read_mpc_oem,
273 .setup_ioapic_ids = numaq_setup_ioapic_ids,
274};
275
276static __init void early_check_numaq(void)
277{
278 /*
279 * Find possible boot-time SMP configuration:
280 */
281 early_find_smp_config();
282
283 /*
284 * get boot-time SMP configuration:
285 */
286 if (smp_found_config)
287 early_get_smp_config();
288
289 if (found_numaq)
290 x86_quirks = &numaq_x86_quirks;
291}
292
293int __init get_memcfg_numaq(void)
294{
295 early_check_numaq();
296 if (!found_numaq)
297 return 0;
298 smp_dump_qct();
299
300 return 1;
301}
302
303#define NUMAQ_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
304
305static inline unsigned int numaq_get_apic_id(unsigned long x)
306{
307 return (x >> 24) & 0x0F;
308}
309
310static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector)
311{
312 default_send_IPI_mask_sequence_logical(mask, vector);
313}
314
315static inline void numaq_send_IPI_allbutself(int vector)
316{
317 default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector);
318}
319
320static inline void numaq_send_IPI_all(int vector)
321{
322 numaq_send_IPI_mask(cpu_online_mask, vector);
323}
324
325#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8)
326#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa)
327
328/*
329 * Because we use NMIs rather than the INIT-STARTUP sequence to
330 * bootstrap the CPUs, the APIC may be in a weird state. Kick it:
331 */
332static inline void numaq_smp_callin_clear_local_apic(void)
333{
334 clear_local_APIC();
335}
336
337static inline const cpumask_t *numaq_target_cpus(void)
338{
339 return &CPU_MASK_ALL;
340}
341
342static inline unsigned long
343numaq_check_apicid_used(physid_mask_t bitmap, int apicid)
344{
345 return physid_isset(apicid, bitmap);
346}
347
348static inline unsigned long numaq_check_apicid_present(int bit)
349{
350 return physid_isset(bit, phys_cpu_present_map);
351}
352
353static inline int numaq_apic_id_registered(void)
354{
355 return 1;
356}
357
358static inline void numaq_init_apic_ldr(void)
359{
360 /* Already done in NUMA-Q firmware */
361}
362
363static inline void numaq_setup_apic_routing(void)
364{
365 printk(KERN_INFO
366 "Enabling APIC mode: NUMA-Q. Using %d I/O APICs\n",
367 nr_ioapics);
368}
369
370/*
371 * Skip adding the timer int on secondary nodes, which causes
372 * a small but painful rift in the time-space continuum.
373 */
374static inline int numaq_multi_timer_check(int apic, int irq)
375{
376 return apic != 0 && irq == 0;
377}
378
379static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map)
380{
381 /* We don't have a good way to do this yet - hack */
382 return physids_promote(0xFUL);
383}
384
385static inline int numaq_cpu_to_logical_apicid(int cpu)
386{
387 if (cpu >= nr_cpu_ids)
388 return BAD_APICID;
389 return cpu_2_logical_apicid[cpu];
390}
391
392/*
393 * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
394 * cpu to APIC ID relation to properly interact with the intelligent
395 * mode of the cluster controller.
396 */
397static inline int numaq_cpu_present_to_apicid(int mps_cpu)
398{
399 if (mps_cpu < 60)
400 return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
401 else
402 return BAD_APICID;
403}
404
405static inline int numaq_apicid_to_node(int logical_apicid)
406{
407 return logical_apicid >> 4;
408}
409
410static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid)
411{
412 int node = numaq_apicid_to_node(logical_apicid);
413 int cpu = __ffs(logical_apicid & 0xf);
414
415 return physid_mask_of_physid(cpu + 4*node);
416}
417
418/* Where the IO area was mapped on multiquad, always 0 otherwise */
419void *xquad_portio;
420
421static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid)
422{
423 return 1;
424}
425
426/*
427 * We use physical apicids here, not logical, so just return the default
428 * physical broadcast to stop people from breaking us
429 */
430static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask)
431{
432 return 0x0F;
433}
434
435static inline unsigned int
436numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
437 const struct cpumask *andmask)
438{
439 return 0x0F;
440}
441
442/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
443static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb)
444{
445 return cpuid_apic >> index_msb;
446}
447
448static int
449numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
450{
451 if (strncmp(oem, "IBM NUMA", 8))
452 printk(KERN_ERR "Warning! Not a NUMA-Q system!\n");
453 else
454 found_numaq = 1;
455
456 return found_numaq;
457}
458
459static int probe_numaq(void)
460{
461 /* already know from get_memcfg_numaq() */
462 return found_numaq;
463}
464
465static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask)
466{
467 /* Careful. Some cpus do not strictly honor the set of cpus
468 * specified in the interrupt destination when using lowest
469 * priority interrupt delivery mode.
470 *
471 * In particular there was a hyperthreading cpu observed to
472 * deliver interrupts to the wrong hyperthread when only one
473 * hyperthread was specified in the interrupt desitination.
474 */
475 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
476}
477
478static void numaq_setup_portio_remap(void)
479{
480 int num_quads = num_online_nodes();
481
482 if (num_quads <= 1)
483 return;
484
485 printk(KERN_INFO
486 "Remapping cross-quad port I/O for %d quads\n", num_quads);
487
488 xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
489
490 printk(KERN_INFO
491 "xquad_portio vaddr 0x%08lx, len %08lx\n",
492 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
493}
494
495struct apic apic_numaq = {
496
497 .name = "NUMAQ",
498 .probe = probe_numaq,
499 .acpi_madt_oem_check = NULL,
500 .apic_id_registered = numaq_apic_id_registered,
501
502 .irq_delivery_mode = dest_LowestPrio,
503 /* physical delivery on LOCAL quad: */
504 .irq_dest_mode = 0,
505
506 .target_cpus = numaq_target_cpus,
507 .disable_esr = 1,
508 .dest_logical = APIC_DEST_LOGICAL,
509 .check_apicid_used = numaq_check_apicid_used,
510 .check_apicid_present = numaq_check_apicid_present,
511
512 .vector_allocation_domain = numaq_vector_allocation_domain,
513 .init_apic_ldr = numaq_init_apic_ldr,
514
515 .ioapic_phys_id_map = numaq_ioapic_phys_id_map,
516 .setup_apic_routing = numaq_setup_apic_routing,
517 .multi_timer_check = numaq_multi_timer_check,
518 .apicid_to_node = numaq_apicid_to_node,
519 .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid,
520 .cpu_present_to_apicid = numaq_cpu_present_to_apicid,
521 .apicid_to_cpu_present = numaq_apicid_to_cpu_present,
522 .setup_portio_remap = numaq_setup_portio_remap,
523 .check_phys_apicid_present = numaq_check_phys_apicid_present,
524 .enable_apic_mode = NULL,
525 .phys_pkg_id = numaq_phys_pkg_id,
526 .mps_oem_check = numaq_mps_oem_check,
527
528 .get_apic_id = numaq_get_apic_id,
529 .set_apic_id = NULL,
530 .apic_id_mask = 0x0F << 24,
531
532 .cpu_mask_to_apicid = numaq_cpu_mask_to_apicid,
533 .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and,
534
535 .send_IPI_mask = numaq_send_IPI_mask,
536 .send_IPI_mask_allbutself = NULL,
537 .send_IPI_allbutself = numaq_send_IPI_allbutself,
538 .send_IPI_all = numaq_send_IPI_all,
539 .send_IPI_self = default_send_IPI_self,
540
541 .wakeup_secondary_cpu = wakeup_secondary_cpu_via_nmi,
542 .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW,
543 .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH,
544
545 /* We don't do anything here because we use NMI's to boot instead */
546 .wait_for_init_deassert = NULL,
547
548 .smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic,
549 .inquire_remote_apic = NULL,
550
551 .read = native_apic_mem_read,
552 .write = native_apic_mem_write,
553 .icr_read = native_apic_icr_read,
554 .icr_write = native_apic_icr_write,
555 .wait_icr_idle = native_apic_wait_icr_idle,
556 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
557};
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
new file mode 100644
index 00000000000..141c99a1c26
--- /dev/null
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -0,0 +1,284 @@
1/*
2 * Default generic APIC driver. This handles up to 8 CPUs.
3 *
4 * Copyright 2003 Andi Kleen, SuSE Labs.
5 * Subject to the GNU Public License, v.2
6 *
7 * Generic x86 APIC driver probe layer.
8 */
9#include <linux/threads.h>
10#include <linux/cpumask.h>
11#include <linux/module.h>
12#include <linux/string.h>
13#include <linux/kernel.h>
14#include <linux/ctype.h>
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <asm/fixmap.h>
18#include <asm/mpspec.h>
19#include <asm/apicdef.h>
20#include <asm/apic.h>
21#include <asm/setup.h>
22
23#include <linux/threads.h>
24#include <linux/cpumask.h>
25#include <asm/mpspec.h>
26#include <asm/fixmap.h>
27#include <asm/apicdef.h>
28#include <linux/kernel.h>
29#include <linux/string.h>
30#include <linux/smp.h>
31#include <linux/init.h>
32#include <asm/ipi.h>
33
34#include <linux/smp.h>
35#include <linux/init.h>
36#include <linux/interrupt.h>
37#include <asm/acpi.h>
38#include <asm/e820.h>
39#include <asm/setup.h>
40
41#ifdef CONFIG_HOTPLUG_CPU
42#define DEFAULT_SEND_IPI (1)
43#else
44#define DEFAULT_SEND_IPI (0)
45#endif
46
47int no_broadcast = DEFAULT_SEND_IPI;
48
49static __init int no_ipi_broadcast(char *str)
50{
51 get_option(&str, &no_broadcast);
52 pr_info("Using %s mode\n",
53 no_broadcast ? "No IPI Broadcast" : "IPI Broadcast");
54 return 1;
55}
56__setup("no_ipi_broadcast=", no_ipi_broadcast);
57
58static int __init print_ipi_mode(void)
59{
60 pr_info("Using IPI %s mode\n",
61 no_broadcast ? "No-Shortcut" : "Shortcut");
62 return 0;
63}
64late_initcall(print_ipi_mode);
65
66void default_setup_apic_routing(void)
67{
68#ifdef CONFIG_X86_IO_APIC
69 printk(KERN_INFO
70 "Enabling APIC mode: Flat. Using %d I/O APICs\n",
71 nr_ioapics);
72#endif
73}
74
75static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
76{
77 /*
78 * Careful. Some cpus do not strictly honor the set of cpus
79 * specified in the interrupt destination when using lowest
80 * priority interrupt delivery mode.
81 *
82 * In particular there was a hyperthreading cpu observed to
83 * deliver interrupts to the wrong hyperthread when only one
84 * hyperthread was specified in the interrupt desitination.
85 */
86 *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
87}
88
89/* should be called last. */
90static int probe_default(void)
91{
92 return 1;
93}
94
95struct apic apic_default = {
96
97 .name = "default",
98 .probe = probe_default,
99 .acpi_madt_oem_check = NULL,
100 .apic_id_registered = default_apic_id_registered,
101
102 .irq_delivery_mode = dest_LowestPrio,
103 /* logical delivery broadcast to all CPUs: */
104 .irq_dest_mode = 1,
105
106 .target_cpus = default_target_cpus,
107 .disable_esr = 0,
108 .dest_logical = APIC_DEST_LOGICAL,
109 .check_apicid_used = default_check_apicid_used,
110 .check_apicid_present = default_check_apicid_present,
111
112 .vector_allocation_domain = default_vector_allocation_domain,
113 .init_apic_ldr = default_init_apic_ldr,
114
115 .ioapic_phys_id_map = default_ioapic_phys_id_map,
116 .setup_apic_routing = default_setup_apic_routing,
117 .multi_timer_check = NULL,
118 .apicid_to_node = default_apicid_to_node,
119 .cpu_to_logical_apicid = default_cpu_to_logical_apicid,
120 .cpu_present_to_apicid = default_cpu_present_to_apicid,
121 .apicid_to_cpu_present = default_apicid_to_cpu_present,
122 .setup_portio_remap = NULL,
123 .check_phys_apicid_present = default_check_phys_apicid_present,
124 .enable_apic_mode = NULL,
125 .phys_pkg_id = default_phys_pkg_id,
126 .mps_oem_check = NULL,
127
128 .get_apic_id = default_get_apic_id,
129 .set_apic_id = NULL,
130 .apic_id_mask = 0x0F << 24,
131
132 .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
133 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
134
135 .send_IPI_mask = default_send_IPI_mask_logical,
136 .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical,
137 .send_IPI_allbutself = default_send_IPI_allbutself,
138 .send_IPI_all = default_send_IPI_all,
139 .send_IPI_self = default_send_IPI_self,
140
141 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
142 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
143
144 .wait_for_init_deassert = default_wait_for_init_deassert,
145
146 .smp_callin_clear_local_apic = NULL,
147 .inquire_remote_apic = default_inquire_remote_apic,
148
149 .read = native_apic_mem_read,
150 .write = native_apic_mem_write,
151 .icr_read = native_apic_icr_read,
152 .icr_write = native_apic_icr_write,
153 .wait_icr_idle = native_apic_wait_icr_idle,
154 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
155};
156
157extern struct apic apic_numaq;
158extern struct apic apic_summit;
159extern struct apic apic_bigsmp;
160extern struct apic apic_es7000;
161extern struct apic apic_es7000_cluster;
162extern struct apic apic_default;
163
164struct apic *apic = &apic_default;
165EXPORT_SYMBOL_GPL(apic);
166
167static struct apic *apic_probe[] __initdata = {
168#ifdef CONFIG_X86_NUMAQ
169 &apic_numaq,
170#endif
171#ifdef CONFIG_X86_SUMMIT
172 &apic_summit,
173#endif
174#ifdef CONFIG_X86_BIGSMP
175 &apic_bigsmp,
176#endif
177#ifdef CONFIG_X86_ES7000
178 &apic_es7000,
179 &apic_es7000_cluster,
180#endif
181 &apic_default, /* must be last */
182 NULL,
183};
184
185static int cmdline_apic __initdata;
186static int __init parse_apic(char *arg)
187{
188 int i;
189
190 if (!arg)
191 return -EINVAL;
192
193 for (i = 0; apic_probe[i]; i++) {
194 if (!strcmp(apic_probe[i]->name, arg)) {
195 apic = apic_probe[i];
196 cmdline_apic = 1;
197 return 0;
198 }
199 }
200
201 /* Parsed again by __setup for debug/verbose */
202 return 0;
203}
204early_param("apic", parse_apic);
205
206void __init generic_bigsmp_probe(void)
207{
208#ifdef CONFIG_X86_BIGSMP
209 /*
210 * This routine is used to switch to bigsmp mode when
211 * - There is no apic= option specified by the user
212 * - generic_apic_probe() has chosen apic_default as the sub_arch
213 * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
214 */
215
216 if (!cmdline_apic && apic == &apic_default) {
217 if (apic_bigsmp.probe()) {
218 apic = &apic_bigsmp;
219 printk(KERN_INFO "Overriding APIC driver with %s\n",
220 apic->name);
221 }
222 }
223#endif
224}
225
226void __init generic_apic_probe(void)
227{
228 if (!cmdline_apic) {
229 int i;
230 for (i = 0; apic_probe[i]; i++) {
231 if (apic_probe[i]->probe()) {
232 apic = apic_probe[i];
233 break;
234 }
235 }
236 /* Not visible without early console */
237 if (!apic_probe[i])
238 panic("Didn't find an APIC driver");
239 }
240 printk(KERN_INFO "Using APIC driver %s\n", apic->name);
241}
242
243/* These functions can switch the APIC even after the initial ->probe() */
244
245int __init
246generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
247{
248 int i;
249
250 for (i = 0; apic_probe[i]; ++i) {
251 if (!apic_probe[i]->mps_oem_check)
252 continue;
253 if (!apic_probe[i]->mps_oem_check(mpc, oem, productid))
254 continue;
255
256 if (!cmdline_apic) {
257 apic = apic_probe[i];
258 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
259 apic->name);
260 }
261 return 1;
262 }
263 return 0;
264}
265
266int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
267{
268 int i;
269
270 for (i = 0; apic_probe[i]; ++i) {
271 if (!apic_probe[i]->acpi_madt_oem_check)
272 continue;
273 if (!apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id))
274 continue;
275
276 if (!cmdline_apic) {
277 apic = apic_probe[i];
278 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
279 apic->name);
280 }
281 return 1;
282 }
283 return 0;
284}
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/apic/probe_64.c
index 2bced78b0b8..8d7748efe6a 100644
--- a/arch/x86/kernel/genapic_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -19,22 +19,27 @@
19#include <linux/dmar.h> 19#include <linux/dmar.h>
20 20
21#include <asm/smp.h> 21#include <asm/smp.h>
22#include <asm/apic.h>
22#include <asm/ipi.h> 23#include <asm/ipi.h>
23#include <asm/genapic.h>
24#include <asm/setup.h> 24#include <asm/setup.h>
25 25
26extern struct genapic apic_flat; 26extern struct apic apic_flat;
27extern struct genapic apic_physflat; 27extern struct apic apic_physflat;
28extern struct genapic apic_x2xpic_uv_x; 28extern struct apic apic_x2xpic_uv_x;
29extern struct genapic apic_x2apic_phys; 29extern struct apic apic_x2apic_phys;
30extern struct genapic apic_x2apic_cluster; 30extern struct apic apic_x2apic_cluster;
31 31
32struct genapic __read_mostly *genapic = &apic_flat; 32struct apic __read_mostly *apic = &apic_flat;
33EXPORT_SYMBOL_GPL(apic);
33 34
34static struct genapic *apic_probe[] __initdata = { 35static struct apic *apic_probe[] __initdata = {
36#ifdef CONFIG_X86_UV
35 &apic_x2apic_uv_x, 37 &apic_x2apic_uv_x,
38#endif
39#ifdef CONFIG_X86_X2APIC
36 &apic_x2apic_phys, 40 &apic_x2apic_phys,
37 &apic_x2apic_cluster, 41 &apic_x2apic_cluster,
42#endif
38 &apic_physflat, 43 &apic_physflat,
39 NULL, 44 NULL,
40}; 45};
@@ -42,39 +47,45 @@ static struct genapic *apic_probe[] __initdata = {
42/* 47/*
43 * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. 48 * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
44 */ 49 */
45void __init setup_apic_routing(void) 50void __init default_setup_apic_routing(void)
46{ 51{
47 if (genapic == &apic_x2apic_phys || genapic == &apic_x2apic_cluster) { 52#ifdef CONFIG_X86_X2APIC
48 if (!intr_remapping_enabled) 53 if (x2apic && (apic != &apic_x2apic_phys &&
49 genapic = &apic_flat; 54#ifdef CONFIG_X86_UV
55 apic != &apic_x2apic_uv_x &&
56#endif
57 apic != &apic_x2apic_cluster)) {
58 if (x2apic_phys)
59 apic = &apic_x2apic_phys;
60 else
61 apic = &apic_x2apic_cluster;
62 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
50 } 63 }
64#endif
51 65
52 if (genapic == &apic_flat) { 66 if (apic == &apic_flat) {
53 if (max_physical_apicid >= 8) 67 if (max_physical_apicid >= 8)
54 genapic = &apic_physflat; 68 apic = &apic_physflat;
55 printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name); 69 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
56 } 70 }
57
58 if (x86_quirks->update_genapic)
59 x86_quirks->update_genapic();
60} 71}
61 72
62/* Same for both flat and physical. */ 73/* Same for both flat and physical. */
63 74
64void apic_send_IPI_self(int vector) 75void apic_send_IPI_self(int vector)
65{ 76{
66 __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); 77 __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
67} 78}
68 79
69int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) 80int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
70{ 81{
71 int i; 82 int i;
72 83
73 for (i = 0; apic_probe[i]; ++i) { 84 for (i = 0; apic_probe[i]; ++i) {
74 if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { 85 if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
75 genapic = apic_probe[i]; 86 apic = apic_probe[i];
76 printk(KERN_INFO "Setting APIC routing to %s.\n", 87 printk(KERN_INFO "Setting APIC routing to %s.\n",
77 genapic->name); 88 apic->name);
78 return 1; 89 return 1;
79 } 90 }
80 } 91 }
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
new file mode 100644
index 00000000000..aac52fa873f
--- /dev/null
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -0,0 +1,579 @@
1/*
2 * IBM Summit-Specific Code
3 *
4 * Written By: Matthew Dobson, IBM Corporation
5 *
6 * Copyright (c) 2003 IBM Corp.
7 *
8 * All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
19 * details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 * Send feedback to <colpatch@us.ibm.com>
26 *
27 */
28
29#include <linux/mm.h>
30#include <linux/init.h>
31#include <asm/io.h>
32#include <asm/bios_ebda.h>
33
34/*
35 * APIC driver for the IBM "Summit" chipset.
36 */
37#include <linux/threads.h>
38#include <linux/cpumask.h>
39#include <asm/mpspec.h>
40#include <asm/apic.h>
41#include <asm/smp.h>
42#include <asm/fixmap.h>
43#include <asm/apicdef.h>
44#include <asm/ipi.h>
45#include <linux/kernel.h>
46#include <linux/string.h>
47#include <linux/init.h>
48#include <linux/gfp.h>
49#include <linux/smp.h>
50
51static unsigned summit_get_apic_id(unsigned long x)
52{
53 return (x >> 24) & 0xFF;
54}
55
56static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector)
57{
58 default_send_IPI_mask_sequence_logical(mask, vector);
59}
60
61static void summit_send_IPI_allbutself(int vector)
62{
63 cpumask_t mask = cpu_online_map;
64 cpu_clear(smp_processor_id(), mask);
65
66 if (!cpus_empty(mask))
67 summit_send_IPI_mask(&mask, vector);
68}
69
70static void summit_send_IPI_all(int vector)
71{
72 summit_send_IPI_mask(&cpu_online_map, vector);
73}
74
75#include <asm/tsc.h>
76
77extern int use_cyclone;
78
79#ifdef CONFIG_X86_SUMMIT_NUMA
80static void setup_summit(void);
81#else
82static inline void setup_summit(void) {}
83#endif
84
85static int summit_mps_oem_check(struct mpc_table *mpc, char *oem,
86 char *productid)
87{
88 if (!strncmp(oem, "IBM ENSW", 8) &&
89 (!strncmp(productid, "VIGIL SMP", 9)
90 || !strncmp(productid, "EXA", 3)
91 || !strncmp(productid, "RUTHLESS SMP", 12))){
92 mark_tsc_unstable("Summit based system");
93 use_cyclone = 1; /*enable cyclone-timer*/
94 setup_summit();
95 return 1;
96 }
97 return 0;
98}
99
100/* Hook from generic ACPI tables.c */
101static int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
102{
103 if (!strncmp(oem_id, "IBM", 3) &&
104 (!strncmp(oem_table_id, "SERVIGIL", 8)
105 || !strncmp(oem_table_id, "EXA", 3))){
106 mark_tsc_unstable("Summit based system");
107 use_cyclone = 1; /*enable cyclone-timer*/
108 setup_summit();
109 return 1;
110 }
111 return 0;
112}
113
114struct rio_table_hdr {
115 unsigned char version; /* Version number of this data structure */
116 /* Version 3 adds chassis_num & WP_index */
117 unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */
118 unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */
119} __attribute__((packed));
120
121struct scal_detail {
122 unsigned char node_id; /* Scalability Node ID */
123 unsigned long CBAR; /* Address of 1MB register space */
124 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
125 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
126 unsigned char port1node; /* Node ID port connected to: 0xFF = None */
127 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
128 unsigned char port2node; /* Node ID port connected to: 0xFF = None */
129 unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */
130 unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */
131} __attribute__((packed));
132
133struct rio_detail {
134 unsigned char node_id; /* RIO Node ID */
135 unsigned long BBAR; /* Address of 1MB register space */
136 unsigned char type; /* Type of device */
137 unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/
138 /* For CYC: Node ID of Twister that owns this CYC */
139 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
140 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
141 unsigned char port1node; /* Node ID port connected to: 0xFF=None */
142 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
143 unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */
144 /* For CYC: 0 */
145 unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */
146 /* = 0 : the XAPIC is not used, ie:*/
147 /* ints fwded to another XAPIC */
148 /* Bits1:7 Reserved */
149 /* For CYC: Bits0:7 Reserved */
150 unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */
151 /* lower slot numbers/PCI bus numbers */
152 /* For CYC: No meaning */
153 unsigned char chassis_num; /* 1 based Chassis number */
154 /* For LookOut WPEGs this field indicates the */
155 /* Expansion Chassis #, enumerated from Boot */
156 /* Node WPEG external port, then Boot Node CYC */
157 /* external port, then Next Vigil chassis WPEG */
158 /* external port, etc. */
159 /* Shared Lookouts have only 1 chassis number (the */
160 /* first one assigned) */
161} __attribute__((packed));
162
163
164typedef enum {
165 CompatTwister = 0, /* Compatibility Twister */
166 AltTwister = 1, /* Alternate Twister of internal 8-way */
167 CompatCyclone = 2, /* Compatibility Cyclone */
168 AltCyclone = 3, /* Alternate Cyclone of internal 8-way */
169 CompatWPEG = 4, /* Compatibility WPEG */
170 AltWPEG = 5, /* Second Planar WPEG */
171 LookOutAWPEG = 6, /* LookOut WPEG */
172 LookOutBWPEG = 7, /* LookOut WPEG */
173} node_type;
174
175static inline int is_WPEG(struct rio_detail *rio){
176 return (rio->type == CompatWPEG || rio->type == AltWPEG ||
177 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
178}
179
180
181/* In clustered mode, the high nibble of APIC ID is a cluster number.
182 * The low nibble is a 4-bit bitmap. */
183#define XAPIC_DEST_CPUS_SHIFT 4
184#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
185#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
186
187#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
188
189static const cpumask_t *summit_target_cpus(void)
190{
191 /* CPU_MASK_ALL (0xff) has undefined behaviour with
192 * dest_LowestPrio mode logical clustered apic interrupt routing
193 * Just start on cpu 0. IRQ balancing will spread load
194 */
195 return &cpumask_of_cpu(0);
196}
197
198static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid)
199{
200 return 0;
201}
202
203/* we don't use the phys_cpu_present_map to indicate apicid presence */
204static unsigned long summit_check_apicid_present(int bit)
205{
206 return 1;
207}
208
209static void summit_init_apic_ldr(void)
210{
211 unsigned long val, id;
212 int count = 0;
213 u8 my_id = (u8)hard_smp_processor_id();
214 u8 my_cluster = APIC_CLUSTER(my_id);
215#ifdef CONFIG_SMP
216 u8 lid;
217 int i;
218
219 /* Create logical APIC IDs by counting CPUs already in cluster. */
220 for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
221 lid = cpu_2_logical_apicid[i];
222 if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster)
223 ++count;
224 }
225#endif
226 /* We only have a 4 wide bitmap in cluster mode. If a deranged
227 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
228 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
229 id = my_cluster | (1UL << count);
230 apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE);
231 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
232 val |= SET_APIC_LOGICAL_ID(id);
233 apic_write(APIC_LDR, val);
234}
235
236static int summit_apic_id_registered(void)
237{
238 return 1;
239}
240
241static void summit_setup_apic_routing(void)
242{
243 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
244 nr_ioapics);
245}
246
247static int summit_apicid_to_node(int logical_apicid)
248{
249#ifdef CONFIG_SMP
250 return apicid_2_node[hard_smp_processor_id()];
251#else
252 return 0;
253#endif
254}
255
256/* Mapping from cpu number to logical apicid */
257static inline int summit_cpu_to_logical_apicid(int cpu)
258{
259#ifdef CONFIG_SMP
260 if (cpu >= nr_cpu_ids)
261 return BAD_APICID;
262 return cpu_2_logical_apicid[cpu];
263#else
264 return logical_smp_processor_id();
265#endif
266}
267
268static int summit_cpu_present_to_apicid(int mps_cpu)
269{
270 if (mps_cpu < nr_cpu_ids)
271 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
272 else
273 return BAD_APICID;
274}
275
276static physid_mask_t summit_ioapic_phys_id_map(physid_mask_t phys_id_map)
277{
278 /* For clustered we don't have a good way to do this yet - hack */
279 return physids_promote(0x0F);
280}
281
282static physid_mask_t summit_apicid_to_cpu_present(int apicid)
283{
284 return physid_mask_of_physid(0);
285}
286
287static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
288{
289 return 1;
290}
291
292static unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask)
293{
294 unsigned int round = 0;
295 int cpu, apicid = 0;
296
297 /*
298 * The cpus in the mask must all be on the apic cluster.
299 */
300 for_each_cpu(cpu, cpumask) {
301 int new_apicid = summit_cpu_to_logical_apicid(cpu);
302
303 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
304 printk("%s: Not a valid mask!\n", __func__);
305 return BAD_APICID;
306 }
307 apicid |= new_apicid;
308 round++;
309 }
310 return apicid;
311}
312
313static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
314 const struct cpumask *andmask)
315{
316 int apicid = summit_cpu_to_logical_apicid(0);
317 cpumask_var_t cpumask;
318
319 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
320 return apicid;
321
322 cpumask_and(cpumask, inmask, andmask);
323 cpumask_and(cpumask, cpumask, cpu_online_mask);
324 apicid = summit_cpu_mask_to_apicid(cpumask);
325
326 free_cpumask_var(cpumask);
327
328 return apicid;
329}
330
331/*
332 * cpuid returns the value latched in the HW at reset, not the APIC ID
333 * register's value. For any box whose BIOS changes APIC IDs, like
334 * clustered APIC systems, we must use hard_smp_processor_id.
335 *
336 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
337 */
338static int summit_phys_pkg_id(int cpuid_apic, int index_msb)
339{
340 return hard_smp_processor_id() >> index_msb;
341}
342
343static int probe_summit(void)
344{
345 /* probed later in mptable/ACPI hooks */
346 return 0;
347}
348
349static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
350{
351 /* Careful. Some cpus do not strictly honor the set of cpus
352 * specified in the interrupt destination when using lowest
353 * priority interrupt delivery mode.
354 *
355 * In particular there was a hyperthreading cpu observed to
356 * deliver interrupts to the wrong hyperthread when only one
357 * hyperthread was specified in the interrupt desitination.
358 */
359 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
360}
361
362#ifdef CONFIG_X86_SUMMIT_NUMA
363static struct rio_table_hdr *rio_table_hdr;
364static struct scal_detail *scal_devs[MAX_NUMNODES];
365static struct rio_detail *rio_devs[MAX_NUMNODES*4];
366
367#ifndef CONFIG_X86_NUMAQ
368static int mp_bus_id_to_node[MAX_MP_BUSSES];
369#endif
370
371static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
372{
373 int twister = 0, node = 0;
374 int i, bus, num_buses;
375
376 for (i = 0; i < rio_table_hdr->num_rio_dev; i++) {
377 if (rio_devs[i]->node_id == rio_devs[wpeg_num]->owner_id) {
378 twister = rio_devs[i]->owner_id;
379 break;
380 }
381 }
382 if (i == rio_table_hdr->num_rio_dev) {
383 printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __func__);
384 return last_bus;
385 }
386
387 for (i = 0; i < rio_table_hdr->num_scal_dev; i++) {
388 if (scal_devs[i]->node_id == twister) {
389 node = scal_devs[i]->node_id;
390 break;
391 }
392 }
393 if (i == rio_table_hdr->num_scal_dev) {
394 printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __func__);
395 return last_bus;
396 }
397
398 switch (rio_devs[wpeg_num]->type) {
399 case CompatWPEG:
400 /*
401 * The Compatibility Winnipeg controls the 2 legacy buses,
402 * the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case
403 * a PCI-PCI bridge card is used in either slot: total 5 buses.
404 */
405 num_buses = 5;
406 break;
407 case AltWPEG:
408 /*
409 * The Alternate Winnipeg controls the 2 133MHz buses [1 slot
410 * each], their 2 "extra" buses, the 100MHz bus [2 slots] and
411 * the "extra" buses for each of those slots: total 7 buses.
412 */
413 num_buses = 7;
414 break;
415 case LookOutAWPEG:
416 case LookOutBWPEG:
417 /*
418 * A Lookout Winnipeg controls 3 100MHz buses [2 slots each]
419 * & the "extra" buses for each of those slots: total 9 buses.
420 */
421 num_buses = 9;
422 break;
423 default:
424 printk(KERN_INFO "%s: Unsupported Winnipeg type!\n", __func__);
425 return last_bus;
426 }
427
428 for (bus = last_bus; bus < last_bus + num_buses; bus++)
429 mp_bus_id_to_node[bus] = node;
430 return bus;
431}
432
433static int build_detail_arrays(void)
434{
435 unsigned long ptr;
436 int i, scal_detail_size, rio_detail_size;
437
438 if (rio_table_hdr->num_scal_dev > MAX_NUMNODES) {
439 printk(KERN_WARNING "%s: MAX_NUMNODES too low! Defined as %d, but system has %d nodes.\n", __func__, MAX_NUMNODES, rio_table_hdr->num_scal_dev);
440 return 0;
441 }
442
443 switch (rio_table_hdr->version) {
444 default:
445 printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __func__, rio_table_hdr->version);
446 return 0;
447 case 2:
448 scal_detail_size = 11;
449 rio_detail_size = 13;
450 break;
451 case 3:
452 scal_detail_size = 12;
453 rio_detail_size = 15;
454 break;
455 }
456
457 ptr = (unsigned long)rio_table_hdr + 3;
458 for (i = 0; i < rio_table_hdr->num_scal_dev; i++, ptr += scal_detail_size)
459 scal_devs[i] = (struct scal_detail *)ptr;
460
461 for (i = 0; i < rio_table_hdr->num_rio_dev; i++, ptr += rio_detail_size)
462 rio_devs[i] = (struct rio_detail *)ptr;
463
464 return 1;
465}
466
467void setup_summit(void)
468{
469 unsigned long ptr;
470 unsigned short offset;
471 int i, next_wpeg, next_bus = 0;
472
473 /* The pointer to the EBDA is stored in the word @ phys 0x40E(40:0E) */
474 ptr = get_bios_ebda();
475 ptr = (unsigned long)phys_to_virt(ptr);
476
477 rio_table_hdr = NULL;
478 offset = 0x180;
479 while (offset) {
480 /* The block id is stored in the 2nd word */
481 if (*((unsigned short *)(ptr + offset + 2)) == 0x4752) {
482 /* set the pointer past the offset & block id */
483 rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4);
484 break;
485 }
486 /* The next offset is stored in the 1st word. 0 means no more */
487 offset = *((unsigned short *)(ptr + offset));
488 }
489 if (!rio_table_hdr) {
490 printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __func__);
491 return;
492 }
493
494 if (!build_detail_arrays())
495 return;
496
497 /* The first Winnipeg we're looking for has an index of 0 */
498 next_wpeg = 0;
499 do {
500 for (i = 0; i < rio_table_hdr->num_rio_dev; i++) {
501 if (is_WPEG(rio_devs[i]) && rio_devs[i]->WP_index == next_wpeg) {
502 /* It's the Winnipeg we're looking for! */
503 next_bus = setup_pci_node_map_for_wpeg(i, next_bus);
504 next_wpeg++;
505 break;
506 }
507 }
508 /*
509 * If we go through all Rio devices and don't find one with
510 * the next index, it means we've found all the Winnipegs,
511 * and thus all the PCI buses.
512 */
513 if (i == rio_table_hdr->num_rio_dev)
514 next_wpeg = 0;
515 } while (next_wpeg != 0);
516}
517#endif
518
519struct apic apic_summit = {
520
521 .name = "summit",
522 .probe = probe_summit,
523 .acpi_madt_oem_check = summit_acpi_madt_oem_check,
524 .apic_id_registered = summit_apic_id_registered,
525
526 .irq_delivery_mode = dest_LowestPrio,
527 /* logical delivery broadcast to all CPUs: */
528 .irq_dest_mode = 1,
529
530 .target_cpus = summit_target_cpus,
531 .disable_esr = 1,
532 .dest_logical = APIC_DEST_LOGICAL,
533 .check_apicid_used = summit_check_apicid_used,
534 .check_apicid_present = summit_check_apicid_present,
535
536 .vector_allocation_domain = summit_vector_allocation_domain,
537 .init_apic_ldr = summit_init_apic_ldr,
538
539 .ioapic_phys_id_map = summit_ioapic_phys_id_map,
540 .setup_apic_routing = summit_setup_apic_routing,
541 .multi_timer_check = NULL,
542 .apicid_to_node = summit_apicid_to_node,
543 .cpu_to_logical_apicid = summit_cpu_to_logical_apicid,
544 .cpu_present_to_apicid = summit_cpu_present_to_apicid,
545 .apicid_to_cpu_present = summit_apicid_to_cpu_present,
546 .setup_portio_remap = NULL,
547 .check_phys_apicid_present = summit_check_phys_apicid_present,
548 .enable_apic_mode = NULL,
549 .phys_pkg_id = summit_phys_pkg_id,
550 .mps_oem_check = summit_mps_oem_check,
551
552 .get_apic_id = summit_get_apic_id,
553 .set_apic_id = NULL,
554 .apic_id_mask = 0xFF << 24,
555
556 .cpu_mask_to_apicid = summit_cpu_mask_to_apicid,
557 .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and,
558
559 .send_IPI_mask = summit_send_IPI_mask,
560 .send_IPI_mask_allbutself = NULL,
561 .send_IPI_allbutself = summit_send_IPI_allbutself,
562 .send_IPI_all = summit_send_IPI_all,
563 .send_IPI_self = default_send_IPI_self,
564
565 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
566 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
567
568 .wait_for_init_deassert = default_wait_for_init_deassert,
569
570 .smp_callin_clear_local_apic = NULL,
571 .inquire_remote_apic = default_inquire_remote_apic,
572
573 .read = native_apic_mem_read,
574 .write = native_apic_mem_write,
575 .icr_read = native_apic_icr_read,
576 .icr_write = native_apic_icr_write,
577 .wait_icr_idle = native_apic_wait_icr_idle,
578 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
579};
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 6ce497cc372..8fb87b6dd63 100644
--- a/arch/x86/kernel/genx2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -7,17 +7,14 @@
7#include <linux/dmar.h> 7#include <linux/dmar.h>
8 8
9#include <asm/smp.h> 9#include <asm/smp.h>
10#include <asm/apic.h>
10#include <asm/ipi.h> 11#include <asm/ipi.h>
11#include <asm/genapic.h>
12 12
13DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid); 13DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
14 14
15static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 15static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
16{ 16{
17 if (cpu_has_x2apic) 17 return x2apic_enabled();
18 return 1;
19
20 return 0;
21} 18}
22 19
23/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 20/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
@@ -36,8 +33,8 @@ static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
36 cpumask_set_cpu(cpu, retmask); 33 cpumask_set_cpu(cpu, retmask);
37} 34}
38 35
39static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, 36static void
40 unsigned int dest) 37 __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
41{ 38{
42 unsigned long cfg; 39 unsigned long cfg;
43 40
@@ -46,7 +43,7 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
46 /* 43 /*
47 * send the IPI. 44 * send the IPI.
48 */ 45 */
49 x2apic_icr_write(cfg, apicid); 46 native_x2apic_icr_write(cfg, apicid);
50} 47}
51 48
52/* 49/*
@@ -57,45 +54,50 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
57 */ 54 */
58static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) 55static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
59{ 56{
60 unsigned long flags;
61 unsigned long query_cpu; 57 unsigned long query_cpu;
58 unsigned long flags;
62 59
63 local_irq_save(flags); 60 local_irq_save(flags);
64 for_each_cpu(query_cpu, mask) 61 for_each_cpu(query_cpu, mask) {
65 __x2apic_send_IPI_dest( 62 __x2apic_send_IPI_dest(
66 per_cpu(x86_cpu_to_logical_apicid, query_cpu), 63 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
67 vector, APIC_DEST_LOGICAL); 64 vector, apic->dest_logical);
65 }
68 local_irq_restore(flags); 66 local_irq_restore(flags);
69} 67}
70 68
71static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, 69static void
72 int vector) 70 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
73{ 71{
74 unsigned long flags;
75 unsigned long query_cpu;
76 unsigned long this_cpu = smp_processor_id(); 72 unsigned long this_cpu = smp_processor_id();
73 unsigned long query_cpu;
74 unsigned long flags;
77 75
78 local_irq_save(flags); 76 local_irq_save(flags);
79 for_each_cpu(query_cpu, mask) 77 for_each_cpu(query_cpu, mask) {
80 if (query_cpu != this_cpu) 78 if (query_cpu == this_cpu)
81 __x2apic_send_IPI_dest( 79 continue;
80 __x2apic_send_IPI_dest(
82 per_cpu(x86_cpu_to_logical_apicid, query_cpu), 81 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
83 vector, APIC_DEST_LOGICAL); 82 vector, apic->dest_logical);
83 }
84 local_irq_restore(flags); 84 local_irq_restore(flags);
85} 85}
86 86
87static void x2apic_send_IPI_allbutself(int vector) 87static void x2apic_send_IPI_allbutself(int vector)
88{ 88{
89 unsigned long flags;
90 unsigned long query_cpu;
91 unsigned long this_cpu = smp_processor_id(); 89 unsigned long this_cpu = smp_processor_id();
90 unsigned long query_cpu;
91 unsigned long flags;
92 92
93 local_irq_save(flags); 93 local_irq_save(flags);
94 for_each_online_cpu(query_cpu) 94 for_each_online_cpu(query_cpu) {
95 if (query_cpu != this_cpu) 95 if (query_cpu == this_cpu)
96 __x2apic_send_IPI_dest( 96 continue;
97 __x2apic_send_IPI_dest(
97 per_cpu(x86_cpu_to_logical_apicid, query_cpu), 98 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
98 vector, APIC_DEST_LOGICAL); 99 vector, apic->dest_logical);
100 }
99 local_irq_restore(flags); 101 local_irq_restore(flags);
100} 102}
101 103
@@ -111,21 +113,21 @@ static int x2apic_apic_id_registered(void)
111 113
112static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) 114static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
113{ 115{
114 int cpu;
115
116 /* 116 /*
117 * We're using fixed IRQ delivery, can only return one logical APIC ID. 117 * We're using fixed IRQ delivery, can only return one logical APIC ID.
118 * May as well be the first. 118 * May as well be the first.
119 */ 119 */
120 cpu = cpumask_first(cpumask); 120 int cpu = cpumask_first(cpumask);
121
121 if ((unsigned)cpu < nr_cpu_ids) 122 if ((unsigned)cpu < nr_cpu_ids)
122 return per_cpu(x86_cpu_to_logical_apicid, cpu); 123 return per_cpu(x86_cpu_to_logical_apicid, cpu);
123 else 124 else
124 return BAD_APICID; 125 return BAD_APICID;
125} 126}
126 127
127static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 128static unsigned int
128 const struct cpumask *andmask) 129x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
130 const struct cpumask *andmask)
129{ 131{
130 int cpu; 132 int cpu;
131 133
@@ -133,15 +135,18 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
133 * We're using fixed IRQ delivery, can only return one logical APIC ID. 135 * We're using fixed IRQ delivery, can only return one logical APIC ID.
134 * May as well be the first. 136 * May as well be the first.
135 */ 137 */
136 for_each_cpu_and(cpu, cpumask, andmask) 138 for_each_cpu_and(cpu, cpumask, andmask) {
137 if (cpumask_test_cpu(cpu, cpu_online_mask)) 139 if (cpumask_test_cpu(cpu, cpu_online_mask))
138 break; 140 break;
141 }
142
139 if (cpu < nr_cpu_ids) 143 if (cpu < nr_cpu_ids)
140 return per_cpu(x86_cpu_to_logical_apicid, cpu); 144 return per_cpu(x86_cpu_to_logical_apicid, cpu);
145
141 return BAD_APICID; 146 return BAD_APICID;
142} 147}
143 148
144static unsigned int get_apic_id(unsigned long x) 149static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x)
145{ 150{
146 unsigned int id; 151 unsigned int id;
147 152
@@ -157,7 +162,7 @@ static unsigned long set_apic_id(unsigned int id)
157 return x; 162 return x;
158} 163}
159 164
160static unsigned int phys_pkg_id(int index_msb) 165static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb)
161{ 166{
162 return current_cpu_data.initial_apicid >> index_msb; 167 return current_cpu_data.initial_apicid >> index_msb;
163} 168}
@@ -172,27 +177,63 @@ static void init_x2apic_ldr(void)
172 int cpu = smp_processor_id(); 177 int cpu = smp_processor_id();
173 178
174 per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR); 179 per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
175 return; 180}
176} 181
177 182struct apic apic_x2apic_cluster = {
178struct genapic apic_x2apic_cluster = { 183
179 .name = "cluster x2apic", 184 .name = "cluster x2apic",
180 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, 185 .probe = NULL,
181 .int_delivery_mode = dest_LowestPrio, 186 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
182 .int_dest_mode = (APIC_DEST_LOGICAL != 0), 187 .apic_id_registered = x2apic_apic_id_registered,
183 .target_cpus = x2apic_target_cpus, 188
184 .vector_allocation_domain = x2apic_vector_allocation_domain, 189 .irq_delivery_mode = dest_LowestPrio,
185 .apic_id_registered = x2apic_apic_id_registered, 190 .irq_dest_mode = 1, /* logical */
186 .init_apic_ldr = init_x2apic_ldr, 191
187 .send_IPI_all = x2apic_send_IPI_all, 192 .target_cpus = x2apic_target_cpus,
188 .send_IPI_allbutself = x2apic_send_IPI_allbutself, 193 .disable_esr = 0,
189 .send_IPI_mask = x2apic_send_IPI_mask, 194 .dest_logical = APIC_DEST_LOGICAL,
190 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, 195 .check_apicid_used = NULL,
191 .send_IPI_self = x2apic_send_IPI_self, 196 .check_apicid_present = NULL,
192 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, 197
193 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, 198 .vector_allocation_domain = x2apic_vector_allocation_domain,
194 .phys_pkg_id = phys_pkg_id, 199 .init_apic_ldr = init_x2apic_ldr,
195 .get_apic_id = get_apic_id, 200
196 .set_apic_id = set_apic_id, 201 .ioapic_phys_id_map = NULL,
197 .apic_id_mask = (0xFFFFFFFFu), 202 .setup_apic_routing = NULL,
203 .multi_timer_check = NULL,
204 .apicid_to_node = NULL,
205 .cpu_to_logical_apicid = NULL,
206 .cpu_present_to_apicid = default_cpu_present_to_apicid,
207 .apicid_to_cpu_present = NULL,
208 .setup_portio_remap = NULL,
209 .check_phys_apicid_present = default_check_phys_apicid_present,
210 .enable_apic_mode = NULL,
211 .phys_pkg_id = x2apic_cluster_phys_pkg_id,
212 .mps_oem_check = NULL,
213
214 .get_apic_id = x2apic_cluster_phys_get_apic_id,
215 .set_apic_id = set_apic_id,
216 .apic_id_mask = 0xFFFFFFFFu,
217
218 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
219 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
220
221 .send_IPI_mask = x2apic_send_IPI_mask,
222 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
223 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
224 .send_IPI_all = x2apic_send_IPI_all,
225 .send_IPI_self = x2apic_send_IPI_self,
226
227 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
228 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
229 .wait_for_init_deassert = NULL,
230 .smp_callin_clear_local_apic = NULL,
231 .inquire_remote_apic = NULL,
232
233 .read = native_apic_msr_read,
234 .write = native_apic_msr_write,
235 .icr_read = native_x2apic_icr_read,
236 .icr_write = native_x2apic_icr_write,
237 .wait_icr_idle = native_x2apic_wait_icr_idle,
238 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
198}; 239};
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 21bcc0e098b..23625b9f98b 100644
--- a/arch/x86/kernel/genx2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -7,10 +7,10 @@
7#include <linux/dmar.h> 7#include <linux/dmar.h>
8 8
9#include <asm/smp.h> 9#include <asm/smp.h>
10#include <asm/apic.h>
10#include <asm/ipi.h> 11#include <asm/ipi.h>
11#include <asm/genapic.h>
12 12
13static int x2apic_phys; 13int x2apic_phys;
14 14
15static int set_x2apic_phys_mode(char *arg) 15static int set_x2apic_phys_mode(char *arg)
16{ 16{
@@ -21,10 +21,10 @@ early_param("x2apic_phys", set_x2apic_phys_mode);
21 21
22static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 22static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
23{ 23{
24 if (cpu_has_x2apic && x2apic_phys) 24 if (x2apic_phys)
25 return 1; 25 return x2apic_enabled();
26 26 else
27 return 0; 27 return 0;
28} 28}
29 29
30/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 30/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
@@ -50,13 +50,13 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
50 /* 50 /*
51 * send the IPI. 51 * send the IPI.
52 */ 52 */
53 x2apic_icr_write(cfg, apicid); 53 native_x2apic_icr_write(cfg, apicid);
54} 54}
55 55
56static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) 56static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
57{ 57{
58 unsigned long flags;
59 unsigned long query_cpu; 58 unsigned long query_cpu;
59 unsigned long flags;
60 60
61 local_irq_save(flags); 61 local_irq_save(flags);
62 for_each_cpu(query_cpu, mask) { 62 for_each_cpu(query_cpu, mask) {
@@ -66,12 +66,12 @@ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
66 local_irq_restore(flags); 66 local_irq_restore(flags);
67} 67}
68 68
69static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, 69static void
70 int vector) 70 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
71{ 71{
72 unsigned long flags;
73 unsigned long query_cpu;
74 unsigned long this_cpu = smp_processor_id(); 72 unsigned long this_cpu = smp_processor_id();
73 unsigned long query_cpu;
74 unsigned long flags;
75 75
76 local_irq_save(flags); 76 local_irq_save(flags);
77 for_each_cpu(query_cpu, mask) { 77 for_each_cpu(query_cpu, mask) {
@@ -85,16 +85,17 @@ static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
85 85
86static void x2apic_send_IPI_allbutself(int vector) 86static void x2apic_send_IPI_allbutself(int vector)
87{ 87{
88 unsigned long flags;
89 unsigned long query_cpu;
90 unsigned long this_cpu = smp_processor_id(); 88 unsigned long this_cpu = smp_processor_id();
89 unsigned long query_cpu;
90 unsigned long flags;
91 91
92 local_irq_save(flags); 92 local_irq_save(flags);
93 for_each_online_cpu(query_cpu) 93 for_each_online_cpu(query_cpu) {
94 if (query_cpu != this_cpu) 94 if (query_cpu == this_cpu)
95 __x2apic_send_IPI_dest( 95 continue;
96 per_cpu(x86_cpu_to_apicid, query_cpu), 96 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
97 vector, APIC_DEST_PHYSICAL); 97 vector, APIC_DEST_PHYSICAL);
98 }
98 local_irq_restore(flags); 99 local_irq_restore(flags);
99} 100}
100 101
@@ -110,21 +111,21 @@ static int x2apic_apic_id_registered(void)
110 111
111static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) 112static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
112{ 113{
113 int cpu;
114
115 /* 114 /*
116 * We're using fixed IRQ delivery, can only return one phys APIC ID. 115 * We're using fixed IRQ delivery, can only return one phys APIC ID.
117 * May as well be the first. 116 * May as well be the first.
118 */ 117 */
119 cpu = cpumask_first(cpumask); 118 int cpu = cpumask_first(cpumask);
119
120 if ((unsigned)cpu < nr_cpu_ids) 120 if ((unsigned)cpu < nr_cpu_ids)
121 return per_cpu(x86_cpu_to_apicid, cpu); 121 return per_cpu(x86_cpu_to_apicid, cpu);
122 else 122 else
123 return BAD_APICID; 123 return BAD_APICID;
124} 124}
125 125
126static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 126static unsigned int
127 const struct cpumask *andmask) 127x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
128 const struct cpumask *andmask)
128{ 129{
129 int cpu; 130 int cpu;
130 131
@@ -132,31 +133,28 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
132 * We're using fixed IRQ delivery, can only return one phys APIC ID. 133 * We're using fixed IRQ delivery, can only return one phys APIC ID.
133 * May as well be the first. 134 * May as well be the first.
134 */ 135 */
135 for_each_cpu_and(cpu, cpumask, andmask) 136 for_each_cpu_and(cpu, cpumask, andmask) {
136 if (cpumask_test_cpu(cpu, cpu_online_mask)) 137 if (cpumask_test_cpu(cpu, cpu_online_mask))
137 break; 138 break;
139 }
140
138 if (cpu < nr_cpu_ids) 141 if (cpu < nr_cpu_ids)
139 return per_cpu(x86_cpu_to_apicid, cpu); 142 return per_cpu(x86_cpu_to_apicid, cpu);
143
140 return BAD_APICID; 144 return BAD_APICID;
141} 145}
142 146
143static unsigned int get_apic_id(unsigned long x) 147static unsigned int x2apic_phys_get_apic_id(unsigned long x)
144{ 148{
145 unsigned int id; 149 return x;
146
147 id = x;
148 return id;
149} 150}
150 151
151static unsigned long set_apic_id(unsigned int id) 152static unsigned long set_apic_id(unsigned int id)
152{ 153{
153 unsigned long x; 154 return id;
154
155 x = id;
156 return x;
157} 155}
158 156
159static unsigned int phys_pkg_id(int index_msb) 157static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
160{ 158{
161 return current_cpu_data.initial_apicid >> index_msb; 159 return current_cpu_data.initial_apicid >> index_msb;
162} 160}
@@ -168,27 +166,63 @@ static void x2apic_send_IPI_self(int vector)
168 166
169static void init_x2apic_ldr(void) 167static void init_x2apic_ldr(void)
170{ 168{
171 return; 169}
172} 170
173 171struct apic apic_x2apic_phys = {
174struct genapic apic_x2apic_phys = { 172
175 .name = "physical x2apic", 173 .name = "physical x2apic",
176 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, 174 .probe = NULL,
177 .int_delivery_mode = dest_Fixed, 175 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
178 .int_dest_mode = (APIC_DEST_PHYSICAL != 0), 176 .apic_id_registered = x2apic_apic_id_registered,
179 .target_cpus = x2apic_target_cpus, 177
180 .vector_allocation_domain = x2apic_vector_allocation_domain, 178 .irq_delivery_mode = dest_Fixed,
181 .apic_id_registered = x2apic_apic_id_registered, 179 .irq_dest_mode = 0, /* physical */
182 .init_apic_ldr = init_x2apic_ldr, 180
183 .send_IPI_all = x2apic_send_IPI_all, 181 .target_cpus = x2apic_target_cpus,
184 .send_IPI_allbutself = x2apic_send_IPI_allbutself, 182 .disable_esr = 0,
185 .send_IPI_mask = x2apic_send_IPI_mask, 183 .dest_logical = 0,
186 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, 184 .check_apicid_used = NULL,
187 .send_IPI_self = x2apic_send_IPI_self, 185 .check_apicid_present = NULL,
188 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, 186
189 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, 187 .vector_allocation_domain = x2apic_vector_allocation_domain,
190 .phys_pkg_id = phys_pkg_id, 188 .init_apic_ldr = init_x2apic_ldr,
191 .get_apic_id = get_apic_id, 189
192 .set_apic_id = set_apic_id, 190 .ioapic_phys_id_map = NULL,
193 .apic_id_mask = (0xFFFFFFFFu), 191 .setup_apic_routing = NULL,
192 .multi_timer_check = NULL,
193 .apicid_to_node = NULL,
194 .cpu_to_logical_apicid = NULL,
195 .cpu_present_to_apicid = default_cpu_present_to_apicid,
196 .apicid_to_cpu_present = NULL,
197 .setup_portio_remap = NULL,
198 .check_phys_apicid_present = default_check_phys_apicid_present,
199 .enable_apic_mode = NULL,
200 .phys_pkg_id = x2apic_phys_pkg_id,
201 .mps_oem_check = NULL,
202
203 .get_apic_id = x2apic_phys_get_apic_id,
204 .set_apic_id = set_apic_id,
205 .apic_id_mask = 0xFFFFFFFFu,
206
207 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
208 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
209
210 .send_IPI_mask = x2apic_send_IPI_mask,
211 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
212 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
213 .send_IPI_all = x2apic_send_IPI_all,
214 .send_IPI_self = x2apic_send_IPI_self,
215
216 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
217 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
218 .wait_for_init_deassert = NULL,
219 .smp_callin_clear_local_apic = NULL,
220 .inquire_remote_apic = NULL,
221
222 .read = native_apic_msr_read,
223 .write = native_apic_msr_write,
224 .icr_read = native_x2apic_icr_read,
225 .icr_write = native_x2apic_icr_write,
226 .wait_icr_idle = native_x2apic_wait_icr_idle,
227 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
194}; 228};
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index b193e082f6c..1bd6da1f8fa 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -7,27 +7,28 @@
7 * 7 *
8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10
11#include <linux/kernel.h>
12#include <linux/threads.h>
13#include <linux/cpu.h>
14#include <linux/cpumask.h> 10#include <linux/cpumask.h>
11#include <linux/hardirq.h>
12#include <linux/proc_fs.h>
13#include <linux/threads.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
15#include <linux/string.h> 16#include <linux/string.h>
16#include <linux/ctype.h> 17#include <linux/ctype.h>
17#include <linux/init.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/module.h>
20#include <linux/hardirq.h>
21#include <linux/timer.h> 19#include <linux/timer.h>
22#include <linux/proc_fs.h> 20#include <linux/cpu.h>
23#include <asm/current.h> 21#include <linux/init.h>
24#include <asm/smp.h> 22
25#include <asm/ipi.h>
26#include <asm/genapic.h>
27#include <asm/pgtable.h>
28#include <asm/uv/uv_mmrs.h> 23#include <asm/uv/uv_mmrs.h>
29#include <asm/uv/uv_hub.h> 24#include <asm/uv/uv_hub.h>
25#include <asm/current.h>
26#include <asm/pgtable.h>
30#include <asm/uv/bios.h> 27#include <asm/uv/bios.h>
28#include <asm/uv/uv.h>
29#include <asm/apic.h>
30#include <asm/ipi.h>
31#include <asm/smp.h>
31 32
32DEFINE_PER_CPU(int, x2apic_extra_bits); 33DEFINE_PER_CPU(int, x2apic_extra_bits);
33 34
@@ -90,39 +91,43 @@ static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
90 cpumask_set_cpu(cpu, retmask); 91 cpumask_set_cpu(cpu, retmask);
91} 92}
92 93
93int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) 94static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
94{ 95{
96#ifdef CONFIG_SMP
95 unsigned long val; 97 unsigned long val;
96 int pnode; 98 int pnode;
97 99
98 pnode = uv_apicid_to_pnode(phys_apicid); 100 pnode = uv_apicid_to_pnode(phys_apicid);
99 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 101 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
100 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 102 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
101 (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 103 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
102 APIC_DM_INIT; 104 APIC_DM_INIT;
103 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 105 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
104 mdelay(10); 106 mdelay(10);
105 107
106 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 108 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
107 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 109 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
108 (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 110 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
109 APIC_DM_STARTUP; 111 APIC_DM_STARTUP;
110 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 112 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
113
114 atomic_set(&init_deasserted, 1);
115#endif
111 return 0; 116 return 0;
112} 117}
113 118
114static void uv_send_IPI_one(int cpu, int vector) 119static void uv_send_IPI_one(int cpu, int vector)
115{ 120{
116 unsigned long val, apicid, lapicid; 121 unsigned long val, apicid;
117 int pnode; 122 int pnode;
118 123
119 apicid = per_cpu(x86_cpu_to_apicid, cpu); 124 apicid = per_cpu(x86_cpu_to_apicid, cpu);
120 lapicid = apicid & 0x3f; /* ZZZ macro needed */
121 pnode = uv_apicid_to_pnode(apicid); 125 pnode = uv_apicid_to_pnode(apicid);
122 val = 126
123 (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid << 127 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
124 UVH_IPI_INT_APIC_ID_SHFT) | 128 (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
125 (vector << UVH_IPI_INT_VECTOR_SHFT); 129 (vector << UVH_IPI_INT_VECTOR_SHFT);
130
126 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 131 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
127} 132}
128 133
@@ -136,22 +141,24 @@ static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
136 141
137static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) 142static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
138{ 143{
139 unsigned int cpu;
140 unsigned int this_cpu = smp_processor_id(); 144 unsigned int this_cpu = smp_processor_id();
145 unsigned int cpu;
141 146
142 for_each_cpu(cpu, mask) 147 for_each_cpu(cpu, mask) {
143 if (cpu != this_cpu) 148 if (cpu != this_cpu)
144 uv_send_IPI_one(cpu, vector); 149 uv_send_IPI_one(cpu, vector);
150 }
145} 151}
146 152
147static void uv_send_IPI_allbutself(int vector) 153static void uv_send_IPI_allbutself(int vector)
148{ 154{
149 unsigned int cpu;
150 unsigned int this_cpu = smp_processor_id(); 155 unsigned int this_cpu = smp_processor_id();
156 unsigned int cpu;
151 157
152 for_each_online_cpu(cpu) 158 for_each_online_cpu(cpu) {
153 if (cpu != this_cpu) 159 if (cpu != this_cpu)
154 uv_send_IPI_one(cpu, vector); 160 uv_send_IPI_one(cpu, vector);
161 }
155} 162}
156 163
157static void uv_send_IPI_all(int vector) 164static void uv_send_IPI_all(int vector)
@@ -170,21 +177,21 @@ static void uv_init_apic_ldr(void)
170 177
171static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) 178static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
172{ 179{
173 int cpu;
174
175 /* 180 /*
176 * We're using fixed IRQ delivery, can only return one phys APIC ID. 181 * We're using fixed IRQ delivery, can only return one phys APIC ID.
177 * May as well be the first. 182 * May as well be the first.
178 */ 183 */
179 cpu = cpumask_first(cpumask); 184 int cpu = cpumask_first(cpumask);
185
180 if ((unsigned)cpu < nr_cpu_ids) 186 if ((unsigned)cpu < nr_cpu_ids)
181 return per_cpu(x86_cpu_to_apicid, cpu); 187 return per_cpu(x86_cpu_to_apicid, cpu);
182 else 188 else
183 return BAD_APICID; 189 return BAD_APICID;
184} 190}
185 191
186static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 192static unsigned int
187 const struct cpumask *andmask) 193uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
194 const struct cpumask *andmask)
188{ 195{
189 int cpu; 196 int cpu;
190 197
@@ -192,15 +199,17 @@ static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
192 * We're using fixed IRQ delivery, can only return one phys APIC ID. 199 * We're using fixed IRQ delivery, can only return one phys APIC ID.
193 * May as well be the first. 200 * May as well be the first.
194 */ 201 */
195 for_each_cpu_and(cpu, cpumask, andmask) 202 for_each_cpu_and(cpu, cpumask, andmask) {
196 if (cpumask_test_cpu(cpu, cpu_online_mask)) 203 if (cpumask_test_cpu(cpu, cpu_online_mask))
197 break; 204 break;
205 }
198 if (cpu < nr_cpu_ids) 206 if (cpu < nr_cpu_ids)
199 return per_cpu(x86_cpu_to_apicid, cpu); 207 return per_cpu(x86_cpu_to_apicid, cpu);
208
200 return BAD_APICID; 209 return BAD_APICID;
201} 210}
202 211
203static unsigned int get_apic_id(unsigned long x) 212static unsigned int x2apic_get_apic_id(unsigned long x)
204{ 213{
205 unsigned int id; 214 unsigned int id;
206 215
@@ -222,10 +231,10 @@ static unsigned long set_apic_id(unsigned int id)
222static unsigned int uv_read_apic_id(void) 231static unsigned int uv_read_apic_id(void)
223{ 232{
224 233
225 return get_apic_id(apic_read(APIC_ID)); 234 return x2apic_get_apic_id(apic_read(APIC_ID));
226} 235}
227 236
228static unsigned int phys_pkg_id(int index_msb) 237static int uv_phys_pkg_id(int initial_apicid, int index_msb)
229{ 238{
230 return uv_read_apic_id() >> index_msb; 239 return uv_read_apic_id() >> index_msb;
231} 240}
@@ -235,26 +244,64 @@ static void uv_send_IPI_self(int vector)
235 apic_write(APIC_SELF_IPI, vector); 244 apic_write(APIC_SELF_IPI, vector);
236} 245}
237 246
238struct genapic apic_x2apic_uv_x = { 247struct apic apic_x2apic_uv_x = {
239 .name = "UV large system", 248
240 .acpi_madt_oem_check = uv_acpi_madt_oem_check, 249 .name = "UV large system",
241 .int_delivery_mode = dest_Fixed, 250 .probe = NULL,
242 .int_dest_mode = (APIC_DEST_PHYSICAL != 0), 251 .acpi_madt_oem_check = uv_acpi_madt_oem_check,
243 .target_cpus = uv_target_cpus, 252 .apic_id_registered = uv_apic_id_registered,
244 .vector_allocation_domain = uv_vector_allocation_domain, 253
245 .apic_id_registered = uv_apic_id_registered, 254 .irq_delivery_mode = dest_Fixed,
246 .init_apic_ldr = uv_init_apic_ldr, 255 .irq_dest_mode = 1, /* logical */
247 .send_IPI_all = uv_send_IPI_all, 256
248 .send_IPI_allbutself = uv_send_IPI_allbutself, 257 .target_cpus = uv_target_cpus,
249 .send_IPI_mask = uv_send_IPI_mask, 258 .disable_esr = 0,
250 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, 259 .dest_logical = APIC_DEST_LOGICAL,
251 .send_IPI_self = uv_send_IPI_self, 260 .check_apicid_used = NULL,
252 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, 261 .check_apicid_present = NULL,
253 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, 262
254 .phys_pkg_id = phys_pkg_id, 263 .vector_allocation_domain = uv_vector_allocation_domain,
255 .get_apic_id = get_apic_id, 264 .init_apic_ldr = uv_init_apic_ldr,
256 .set_apic_id = set_apic_id, 265
257 .apic_id_mask = (0xFFFFFFFFu), 266 .ioapic_phys_id_map = NULL,
267 .setup_apic_routing = NULL,
268 .multi_timer_check = NULL,
269 .apicid_to_node = NULL,
270 .cpu_to_logical_apicid = NULL,
271 .cpu_present_to_apicid = default_cpu_present_to_apicid,
272 .apicid_to_cpu_present = NULL,
273 .setup_portio_remap = NULL,
274 .check_phys_apicid_present = default_check_phys_apicid_present,
275 .enable_apic_mode = NULL,
276 .phys_pkg_id = uv_phys_pkg_id,
277 .mps_oem_check = NULL,
278
279 .get_apic_id = x2apic_get_apic_id,
280 .set_apic_id = set_apic_id,
281 .apic_id_mask = 0xFFFFFFFFu,
282
283 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
284 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
285
286 .send_IPI_mask = uv_send_IPI_mask,
287 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
288 .send_IPI_allbutself = uv_send_IPI_allbutself,
289 .send_IPI_all = uv_send_IPI_all,
290 .send_IPI_self = uv_send_IPI_self,
291
292 .wakeup_secondary_cpu = uv_wakeup_secondary,
293 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
294 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
295 .wait_for_init_deassert = NULL,
296 .smp_callin_clear_local_apic = NULL,
297 .inquire_remote_apic = NULL,
298
299 .read = native_apic_msr_read,
300 .write = native_apic_msr_write,
301 .icr_read = native_x2apic_icr_read,
302 .icr_write = native_x2apic_icr_write,
303 .wait_icr_idle = native_x2apic_wait_icr_idle,
304 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
258}; 305};
259 306
260static __cpuinit void set_x2apic_extra_bits(int pnode) 307static __cpuinit void set_x2apic_extra_bits(int pnode)
@@ -322,7 +369,7 @@ static __init void map_high(char *id, unsigned long base, int shift,
322 paddr = base << shift; 369 paddr = base << shift;
323 bytes = (1UL << shift) * (max_pnode + 1); 370 bytes = (1UL << shift) * (max_pnode + 1);
324 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, 371 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
325 paddr + bytes); 372 paddr + bytes);
326 if (map_type == map_uc) 373 if (map_type == map_uc)
327 init_extra_mapping_uc(paddr, bytes); 374 init_extra_mapping_uc(paddr, bytes);
328 else 375 else
@@ -485,7 +532,7 @@ late_initcall(uv_init_heartbeat);
485 532
486/* 533/*
487 * Called on each cpu to initialize the per_cpu UV data area. 534 * Called on each cpu to initialize the per_cpu UV data area.
488 * ZZZ hotplug not supported yet 535 * FIXME: hotplug not supported yet
489 */ 536 */
490void __cpuinit uv_cpu_init(void) 537void __cpuinit uv_cpu_init(void)
491{ 538{
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 266ec6c18b6..10033fe718e 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -301,7 +301,7 @@ extern int (*console_blank_hook)(int);
301 */ 301 */
302#define APM_ZERO_SEGS 302#define APM_ZERO_SEGS
303 303
304#include "apm.h" 304#include <asm/apm.h>
305 305
306/* 306/*
307 * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend. 307 * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend.
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index ee4df08feee..fbf2f33e308 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -75,6 +75,7 @@ void foo(void)
75 OFFSET(PT_DS, pt_regs, ds); 75 OFFSET(PT_DS, pt_regs, ds);
76 OFFSET(PT_ES, pt_regs, es); 76 OFFSET(PT_ES, pt_regs, es);
77 OFFSET(PT_FS, pt_regs, fs); 77 OFFSET(PT_FS, pt_regs, fs);
78 OFFSET(PT_GS, pt_regs, gs);
78 OFFSET(PT_ORIG_EAX, pt_regs, orig_ax); 79 OFFSET(PT_ORIG_EAX, pt_regs, orig_ax);
79 OFFSET(PT_EIP, pt_regs, ip); 80 OFFSET(PT_EIP, pt_regs, ip);
80 OFFSET(PT_CS, pt_regs, cs); 81 OFFSET(PT_CS, pt_regs, cs);
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 1d41d3f1edb..8793ab33e2c 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -11,7 +11,6 @@
11#include <linux/hardirq.h> 11#include <linux/hardirq.h>
12#include <linux/suspend.h> 12#include <linux/suspend.h>
13#include <linux/kbuild.h> 13#include <linux/kbuild.h>
14#include <asm/pda.h>
15#include <asm/processor.h> 14#include <asm/processor.h>
16#include <asm/segment.h> 15#include <asm/segment.h>
17#include <asm/thread_info.h> 16#include <asm/thread_info.h>
@@ -48,16 +47,6 @@ int main(void)
48#endif 47#endif
49 BLANK(); 48 BLANK();
50#undef ENTRY 49#undef ENTRY
51#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
52 ENTRY(kernelstack);
53 ENTRY(oldrsp);
54 ENTRY(pcurrent);
55 ENTRY(irqcount);
56 ENTRY(cpunumber);
57 ENTRY(irqstackptr);
58 ENTRY(data_offset);
59 BLANK();
60#undef ENTRY
61#ifdef CONFIG_PARAVIRT 50#ifdef CONFIG_PARAVIRT
62 BLANK(); 51 BLANK();
63 OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); 52 OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index 2ac0ab71412..b617b1164f1 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -83,7 +83,7 @@ void __init setup_bios_corruption_check(void)
83 u64 size; 83 u64 size;
84 addr = find_e820_area_size(addr, &size, PAGE_SIZE); 84 addr = find_e820_area_size(addr, &size, PAGE_SIZE);
85 85
86 if (addr == 0) 86 if (!(addr + 1))
87 break; 87 break;
88 88
89 if ((addr + size) > corruption_check_size) 89 if ((addr + size) > corruption_check_size)
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 82db7f45e2d..d4356f8b752 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -14,6 +14,8 @@ obj-y += vmware.o hypervisor.o
14obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o 14obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o
15obj-$(CONFIG_X86_64) += bugs_64.o 15obj-$(CONFIG_X86_64) += bugs_64.o
16 16
17obj-$(CONFIG_X86_CPU_DEBUG) += cpu_debug.o
18
17obj-$(CONFIG_CPU_SUP_INTEL) += intel.o 19obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
18obj-$(CONFIG_CPU_SUP_AMD) += amd.o 20obj-$(CONFIG_CPU_SUP_AMD) += amd.o
19obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o 21obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 2cf23634b6d..8220ae69849 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -7,7 +7,7 @@
7#include <asm/pat.h> 7#include <asm/pat.h>
8#include <asm/processor.h> 8#include <asm/processor.h>
9 9
10#include <mach_apic.h> 10#include <asm/apic.h>
11 11
12struct cpuid_bit { 12struct cpuid_bit {
13 u16 feature; 13 u16 feature;
@@ -29,7 +29,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
29 u32 regs[4]; 29 u32 regs[4];
30 const struct cpuid_bit *cb; 30 const struct cpuid_bit *cb;
31 31
32 static const struct cpuid_bit cpuid_bits[] = { 32 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, 33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
34 { 0, 0, 0, 0 } 34 { 0, 0, 0, 0 }
35 }; 35 };
@@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
69 */ 69 */
70void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) 70void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
71{ 71{
72#ifdef CONFIG_X86_SMP 72#ifdef CONFIG_SMP
73 unsigned int eax, ebx, ecx, edx, sub_index; 73 unsigned int eax, ebx, ecx, edx, sub_index;
74 unsigned int ht_mask_width, core_plus_mask_width; 74 unsigned int ht_mask_width, core_plus_mask_width;
75 unsigned int core_select_mask, core_level_siblings; 75 unsigned int core_select_mask, core_level_siblings;
@@ -116,22 +116,14 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
116 116
117 core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; 117 core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
118 118
119#ifdef CONFIG_X86_32 119 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width)
120 c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
121 & core_select_mask; 120 & core_select_mask;
122 c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width); 121 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width);
123 /* 122 /*
124 * Reinit the apicid, now that we have extended initial_apicid. 123 * Reinit the apicid, now that we have extended initial_apicid.
125 */ 124 */
126 c->apicid = phys_pkg_id(c->initial_apicid, 0); 125 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
127#else 126
128 c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
129 c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
130 /*
131 * Reinit the apicid, now that we have extended initial_apicid.
132 */
133 c->apicid = phys_pkg_id(0);
134#endif
135 c->x86_max_cores = (core_level_siblings / smp_num_siblings); 127 c->x86_max_cores = (core_level_siblings / smp_num_siblings);
136 128
137 129
@@ -143,37 +135,3 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
143 return; 135 return;
144#endif 136#endif
145} 137}
146
147#ifdef CONFIG_X86_PAT
148void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
149{
150 if (!cpu_has_pat)
151 pat_disable("PAT not supported by CPU.");
152
153 switch (c->x86_vendor) {
154 case X86_VENDOR_INTEL:
155 /*
156 * There is a known erratum on Pentium III and Core Solo
157 * and Core Duo CPUs.
158 * " Page with PAT set to WC while associated MTRR is UC
159 * may consolidate to UC "
160 * Because of this erratum, it is better to stick with
161 * setting WC in MTRR rather than using PAT on these CPUs.
162 *
163 * Enable PAT WC only on P4, Core 2 or later CPUs.
164 */
165 if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
166 return;
167
168 pat_disable("PAT WC disabled due to known CPU erratum.");
169 return;
170
171 case X86_VENDOR_AMD:
172 case X86_VENDOR_CENTAUR:
173 case X86_VENDOR_TRANSMETA:
174 return;
175 }
176
177 pat_disable("PAT disabled. Not yet verified on this CPU type.");
178}
179#endif
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 7c878f6aa91..7e4a459daa6 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -5,6 +5,7 @@
5#include <asm/io.h> 5#include <asm/io.h>
6#include <asm/processor.h> 6#include <asm/processor.h>
7#include <asm/apic.h> 7#include <asm/apic.h>
8#include <asm/cpu.h>
8 9
9#ifdef CONFIG_X86_64 10#ifdef CONFIG_X86_64
10# include <asm/numa_64.h> 11# include <asm/numa_64.h>
@@ -12,8 +13,6 @@
12# include <asm/cacheflush.h> 13# include <asm/cacheflush.h>
13#endif 14#endif
14 15
15#include <mach_apic.h>
16
17#include "cpu.h" 16#include "cpu.h"
18 17
19#ifdef CONFIG_X86_32 18#ifdef CONFIG_X86_32
@@ -143,6 +142,55 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
143 } 142 }
144} 143}
145 144
145static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
146{
147#ifdef CONFIG_SMP
148 /* calling is from identify_secondary_cpu() ? */
149 if (c->cpu_index == boot_cpu_id)
150 return;
151
152 /*
153 * Certain Athlons might work (for various values of 'work') in SMP
154 * but they are not certified as MP capable.
155 */
156 /* Athlon 660/661 is valid. */
157 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
158 (c->x86_mask == 1)))
159 goto valid_k7;
160
161 /* Duron 670 is valid */
162 if ((c->x86_model == 7) && (c->x86_mask == 0))
163 goto valid_k7;
164
165 /*
166 * Athlon 662, Duron 671, and Athlon >model 7 have capability
167 * bit. It's worth noting that the A5 stepping (662) of some
168 * Athlon XP's have the MP bit set.
169 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
170 * more.
171 */
172 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
173 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
174 (c->x86_model > 7))
175 if (cpu_has_mp)
176 goto valid_k7;
177
178 /* If we get here, not a certified SMP capable AMD system. */
179
180 /*
181 * Don't taint if we are running SMP kernel on a single non-MP
182 * approved Athlon
183 */
184 WARN_ONCE(1, "WARNING: This combination of AMD"
185 "processors is not suitable for SMP.\n");
186 if (!test_taint(TAINT_UNSAFE_SMP))
187 add_taint(TAINT_UNSAFE_SMP);
188
189valid_k7:
190 ;
191#endif
192}
193
146static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) 194static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
147{ 195{
148 u32 l, h; 196 u32 l, h;
@@ -177,6 +225,8 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
177 } 225 }
178 226
179 set_cpu_cap(c, X86_FEATURE_K7); 227 set_cpu_cap(c, X86_FEATURE_K7);
228
229 amd_k7_smp_check(c);
180} 230}
181#endif 231#endif
182 232
@@ -452,7 +502,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int
452} 502}
453#endif 503#endif
454 504
455static struct cpu_dev amd_cpu_dev __cpuinitdata = { 505static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
456 .c_vendor = "AMD", 506 .c_vendor = "AMD",
457 .c_ident = { "AuthenticAMD" }, 507 .c_ident = { "AuthenticAMD" },
458#ifdef CONFIG_X86_32 508#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 89bfdd9cacc..983e0830f0d 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -468,7 +468,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
468 return size; 468 return size;
469} 469}
470 470
471static struct cpu_dev centaur_cpu_dev __cpuinitdata = { 471static const struct cpu_dev __cpuinitconst centaur_cpu_dev = {
472 .c_vendor = "Centaur", 472 .c_vendor = "Centaur",
473 .c_ident = { "CentaurHauls" }, 473 .c_ident = { "CentaurHauls" },
474 .c_early_init = early_init_centaur, 474 .c_early_init = early_init_centaur,
diff --git a/arch/x86/kernel/cpu/centaur_64.c b/arch/x86/kernel/cpu/centaur_64.c
index a1625f5a1e7..51b09c48c9c 100644
--- a/arch/x86/kernel/cpu/centaur_64.c
+++ b/arch/x86/kernel/cpu/centaur_64.c
@@ -25,7 +25,7 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
25 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 25 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
26} 26}
27 27
28static struct cpu_dev centaur_cpu_dev __cpuinitdata = { 28static const struct cpu_dev centaur_cpu_dev __cpuinitconst = {
29 .c_vendor = "Centaur", 29 .c_vendor = "Centaur",
30 .c_ident = { "CentaurHauls" }, 30 .c_ident = { "CentaurHauls" },
31 .c_early_init = early_init_centaur, 31 .c_early_init = early_init_centaur,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 83492b1f93b..e2962cc1e27 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1,118 +1,129 @@
1#include <linux/init.h>
2#include <linux/kernel.h>
3#include <linux/sched.h>
4#include <linux/string.h>
5#include <linux/bootmem.h> 1#include <linux/bootmem.h>
2#include <linux/linkage.h>
6#include <linux/bitops.h> 3#include <linux/bitops.h>
4#include <linux/kernel.h>
7#include <linux/module.h> 5#include <linux/module.h>
8#include <linux/kgdb.h> 6#include <linux/percpu.h>
9#include <linux/topology.h> 7#include <linux/string.h>
10#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/sched.h>
10#include <linux/init.h>
11#include <linux/kgdb.h>
11#include <linux/smp.h> 12#include <linux/smp.h>
12#include <linux/percpu.h> 13#include <linux/io.h>
13#include <asm/i387.h> 14
14#include <asm/msr.h> 15#include <asm/stackprotector.h>
15#include <asm/io.h>
16#include <asm/linkage.h>
17#include <asm/mmu_context.h> 16#include <asm/mmu_context.h>
17#include <asm/hypervisor.h>
18#include <asm/processor.h>
19#include <asm/sections.h>
20#include <asm/topology.h>
21#include <asm/cpumask.h>
22#include <asm/pgtable.h>
23#include <asm/atomic.h>
24#include <asm/proto.h>
25#include <asm/setup.h>
26#include <asm/apic.h>
27#include <asm/desc.h>
28#include <asm/i387.h>
18#include <asm/mtrr.h> 29#include <asm/mtrr.h>
30#include <asm/numa.h>
31#include <asm/asm.h>
32#include <asm/cpu.h>
19#include <asm/mce.h> 33#include <asm/mce.h>
34#include <asm/msr.h>
20#include <asm/pat.h> 35#include <asm/pat.h>
21#include <asm/asm.h>
22#include <asm/numa.h>
23#include <asm/smp.h> 36#include <asm/smp.h>
37
24#ifdef CONFIG_X86_LOCAL_APIC 38#ifdef CONFIG_X86_LOCAL_APIC
25#include <asm/mpspec.h> 39#include <asm/uv/uv.h>
26#include <asm/apic.h>
27#include <mach_apic.h>
28#include <asm/genapic.h>
29#endif 40#endif
30 41
31#include <asm/pda.h>
32#include <asm/pgtable.h>
33#include <asm/processor.h>
34#include <asm/desc.h>
35#include <asm/atomic.h>
36#include <asm/proto.h>
37#include <asm/sections.h>
38#include <asm/setup.h>
39#include <asm/hypervisor.h>
40
41#include "cpu.h" 42#include "cpu.h"
42 43
43#ifdef CONFIG_X86_64 44#ifdef CONFIG_X86_64
44 45
45/* all of these masks are initialized in setup_cpu_local_masks() */ 46/* all of these masks are initialized in setup_cpu_local_masks() */
46cpumask_var_t cpu_callin_mask;
47cpumask_var_t cpu_callout_mask;
48cpumask_var_t cpu_initialized_mask; 47cpumask_var_t cpu_initialized_mask;
48cpumask_var_t cpu_callout_mask;
49cpumask_var_t cpu_callin_mask;
49 50
50/* representing cpus for which sibling maps can be computed */ 51/* representing cpus for which sibling maps can be computed */
51cpumask_var_t cpu_sibling_setup_mask; 52cpumask_var_t cpu_sibling_setup_mask;
52 53
54/* correctly size the local cpu masks */
55void __init setup_cpu_local_masks(void)
56{
57 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
58 alloc_bootmem_cpumask_var(&cpu_callin_mask);
59 alloc_bootmem_cpumask_var(&cpu_callout_mask);
60 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
61}
62
53#else /* CONFIG_X86_32 */ 63#else /* CONFIG_X86_32 */
54 64
55cpumask_t cpu_callin_map; 65cpumask_t cpu_sibling_setup_map;
56cpumask_t cpu_callout_map; 66cpumask_t cpu_callout_map;
57cpumask_t cpu_initialized; 67cpumask_t cpu_initialized;
58cpumask_t cpu_sibling_setup_map; 68cpumask_t cpu_callin_map;
59 69
60#endif /* CONFIG_X86_32 */ 70#endif /* CONFIG_X86_32 */
61 71
62 72
63static struct cpu_dev *this_cpu __cpuinitdata; 73static const struct cpu_dev *this_cpu __cpuinitdata;
64 74
75DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
65#ifdef CONFIG_X86_64 76#ifdef CONFIG_X86_64
66/* We need valid kernel segments for data and code in long mode too 77 /*
67 * IRET will check the segment types kkeil 2000/10/28 78 * We need valid kernel segments for data and code in long mode too
68 * Also sysret mandates a special GDT layout 79 * IRET will check the segment types kkeil 2000/10/28
69 */ 80 * Also sysret mandates a special GDT layout
70/* The TLS descriptors are currently at a different place compared to i386. 81 *
71 Hopefully nobody expects them at a fixed place (Wine?) */ 82 * TLS descriptors are currently at a different place compared to i386.
72DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { 83 * Hopefully nobody expects them at a fixed place (Wine?)
73 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, 84 */
74 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, 85 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
75 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, 86 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
76 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, 87 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
77 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, 88 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
78 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, 89 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
79} }; 90 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
80#else 91#else
81DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 92 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
82 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, 93 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
83 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, 94 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
84 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, 95 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
85 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
86 /* 96 /*
87 * Segments used for calling PnP BIOS have byte granularity. 97 * Segments used for calling PnP BIOS have byte granularity.
88 * They code segments and data segments have fixed 64k limits, 98 * They code segments and data segments have fixed 64k limits,
89 * the transfer segment sizes are set at run time. 99 * the transfer segment sizes are set at run time.
90 */ 100 */
91 /* 32-bit code */ 101 /* 32-bit code */
92 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, 102 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
93 /* 16-bit code */ 103 /* 16-bit code */
94 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, 104 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
95 /* 16-bit data */ 105 /* 16-bit data */
96 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, 106 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
97 /* 16-bit data */ 107 /* 16-bit data */
98 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, 108 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
99 /* 16-bit data */ 109 /* 16-bit data */
100 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, 110 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
101 /* 111 /*
102 * The APM segments have byte granularity and their bases 112 * The APM segments have byte granularity and their bases
103 * are set at run time. All have 64k limits. 113 * are set at run time. All have 64k limits.
104 */ 114 */
105 /* 32-bit code */ 115 /* 32-bit code */
106 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, 116 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
107 /* 16-bit code */ 117 /* 16-bit code */
108 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, 118 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
109 /* data */ 119 /* data */
110 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, 120 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
111 121
112 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, 122 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
113 [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, 123 [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
114} }; 124 GDT_STACK_CANARY_INIT
115#endif 125#endif
126} };
116EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 127EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
117 128
118#ifdef CONFIG_X86_32 129#ifdef CONFIG_X86_32
@@ -153,16 +164,17 @@ static inline int flag_is_changeable_p(u32 flag)
153 * the CPUID. Add "volatile" to not allow gcc to 164 * the CPUID. Add "volatile" to not allow gcc to
154 * optimize the subsequent calls to this function. 165 * optimize the subsequent calls to this function.
155 */ 166 */
156 asm volatile ("pushfl\n\t" 167 asm volatile ("pushfl \n\t"
157 "pushfl\n\t" 168 "pushfl \n\t"
158 "popl %0\n\t" 169 "popl %0 \n\t"
159 "movl %0,%1\n\t" 170 "movl %0, %1 \n\t"
160 "xorl %2,%0\n\t" 171 "xorl %2, %0 \n\t"
161 "pushl %0\n\t" 172 "pushl %0 \n\t"
162 "popfl\n\t" 173 "popfl \n\t"
163 "pushfl\n\t" 174 "pushfl \n\t"
164 "popl %0\n\t" 175 "popl %0 \n\t"
165 "popfl\n\t" 176 "popfl \n\t"
177
166 : "=&r" (f1), "=&r" (f2) 178 : "=&r" (f1), "=&r" (f2)
167 : "ir" (flag)); 179 : "ir" (flag));
168 180
@@ -177,18 +189,22 @@ static int __cpuinit have_cpuid_p(void)
177 189
178static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 190static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
179{ 191{
180 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { 192 unsigned long lo, hi;
181 /* Disable processor serial number */ 193
182 unsigned long lo, hi; 194 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
183 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 195 return;
184 lo |= 0x200000; 196
185 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 197 /* Disable processor serial number: */
186 printk(KERN_NOTICE "CPU serial number disabled.\n"); 198
187 clear_cpu_cap(c, X86_FEATURE_PN); 199 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
188 200 lo |= 0x200000;
189 /* Disabling the serial number may affect the cpuid level */ 201 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
190 c->cpuid_level = cpuid_eax(0); 202
191 } 203 printk(KERN_NOTICE "CPU serial number disabled.\n");
204 clear_cpu_cap(c, X86_FEATURE_PN);
205
206 /* Disabling the serial number may affect the cpuid level */
207 c->cpuid_level = cpuid_eax(0);
192} 208}
193 209
194static int __init x86_serial_nr_setup(char *s) 210static int __init x86_serial_nr_setup(char *s)
@@ -213,16 +229,64 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
213#endif 229#endif
214 230
215/* 231/*
232 * Some CPU features depend on higher CPUID levels, which may not always
233 * be available due to CPUID level capping or broken virtualization
234 * software. Add those features to this table to auto-disable them.
235 */
236struct cpuid_dependent_feature {
237 u32 feature;
238 u32 level;
239};
240
241static const struct cpuid_dependent_feature __cpuinitconst
242cpuid_dependent_features[] = {
243 { X86_FEATURE_MWAIT, 0x00000005 },
244 { X86_FEATURE_DCA, 0x00000009 },
245 { X86_FEATURE_XSAVE, 0x0000000d },
246 { 0, 0 }
247};
248
249static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
250{
251 const struct cpuid_dependent_feature *df;
252
253 for (df = cpuid_dependent_features; df->feature; df++) {
254
255 if (!cpu_has(c, df->feature))
256 continue;
257 /*
258 * Note: cpuid_level is set to -1 if unavailable, but
259 * extended_extended_level is set to 0 if unavailable
260 * and the legitimate extended levels are all negative
261 * when signed; hence the weird messing around with
262 * signs here...
263 */
264 if (!((s32)df->level < 0 ?
265 (u32)df->level > (u32)c->extended_cpuid_level :
266 (s32)df->level > (s32)c->cpuid_level))
267 continue;
268
269 clear_cpu_cap(c, df->feature);
270 if (!warn)
271 continue;
272
273 printk(KERN_WARNING
274 "CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
275 x86_cap_flags[df->feature], df->level);
276 }
277}
278
279/*
216 * Naming convention should be: <Name> [(<Codename>)] 280 * Naming convention should be: <Name> [(<Codename>)]
217 * This table only is used unless init_<vendor>() below doesn't set it; 281 * This table only is used unless init_<vendor>() below doesn't set it;
218 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used 282 * in particular, if CPUID levels 0x80000002..4 are supported, this
219 * 283 * isn't used
220 */ 284 */
221 285
222/* Look up CPU names by table lookup. */ 286/* Look up CPU names by table lookup. */
223static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) 287static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
224{ 288{
225 struct cpu_model_info *info; 289 const struct cpu_model_info *info;
226 290
227 if (c->x86_model >= 16) 291 if (c->x86_model >= 16)
228 return NULL; /* Range check */ 292 return NULL; /* Range check */
@@ -242,21 +306,34 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
242 306
243__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; 307__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
244 308
245/* Current gdt points %fs at the "master" per-cpu area: after this, 309void load_percpu_segment(int cpu)
246 * it's on the real one. */ 310{
247void switch_to_new_gdt(void) 311#ifdef CONFIG_X86_32
312 loadsegment(fs, __KERNEL_PERCPU);
313#else
314 loadsegment(gs, 0);
315 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
316#endif
317 load_stack_canary_segment();
318}
319
320/*
321 * Current gdt points %fs at the "master" per-cpu area: after this,
322 * it's on the real one.
323 */
324void switch_to_new_gdt(int cpu)
248{ 325{
249 struct desc_ptr gdt_descr; 326 struct desc_ptr gdt_descr;
250 327
251 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); 328 gdt_descr.address = (long)get_cpu_gdt_table(cpu);
252 gdt_descr.size = GDT_SIZE - 1; 329 gdt_descr.size = GDT_SIZE - 1;
253 load_gdt(&gdt_descr); 330 load_gdt(&gdt_descr);
254#ifdef CONFIG_X86_32 331 /* Reload the per-cpu base */
255 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); 332
256#endif 333 load_percpu_segment(cpu);
257} 334}
258 335
259static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 336static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
260 337
261static void __cpuinit default_init(struct cpuinfo_x86 *c) 338static void __cpuinit default_init(struct cpuinfo_x86 *c)
262{ 339{
@@ -275,7 +352,7 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
275#endif 352#endif
276} 353}
277 354
278static struct cpu_dev __cpuinitdata default_cpu = { 355static const struct cpu_dev __cpuinitconst default_cpu = {
279 .c_init = default_init, 356 .c_init = default_init,
280 .c_vendor = "Unknown", 357 .c_vendor = "Unknown",
281 .c_x86_vendor = X86_VENDOR_UNKNOWN, 358 .c_x86_vendor = X86_VENDOR_UNKNOWN,
@@ -289,22 +366,24 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
289 if (c->extended_cpuid_level < 0x80000004) 366 if (c->extended_cpuid_level < 0x80000004)
290 return; 367 return;
291 368
292 v = (unsigned int *) c->x86_model_id; 369 v = (unsigned int *)c->x86_model_id;
293 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 370 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
294 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 371 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
295 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 372 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
296 c->x86_model_id[48] = 0; 373 c->x86_model_id[48] = 0;
297 374
298 /* Intel chips right-justify this string for some dumb reason; 375 /*
299 undo that brain damage */ 376 * Intel chips right-justify this string for some dumb reason;
377 * undo that brain damage:
378 */
300 p = q = &c->x86_model_id[0]; 379 p = q = &c->x86_model_id[0];
301 while (*p == ' ') 380 while (*p == ' ')
302 p++; 381 p++;
303 if (p != q) { 382 if (p != q) {
304 while (*p) 383 while (*p)
305 *q++ = *p++; 384 *q++ = *p++;
306 while (q <= &c->x86_model_id[48]) 385 while (q <= &c->x86_model_id[48])
307 *q++ = '\0'; /* Zero-pad the rest */ 386 *q++ = '\0'; /* Zero-pad the rest */
308 } 387 }
309} 388}
310 389
@@ -373,36 +452,30 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
373 452
374 if (smp_num_siblings == 1) { 453 if (smp_num_siblings == 1) {
375 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 454 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
376 } else if (smp_num_siblings > 1) { 455 goto out;
456 }
377 457
378 if (smp_num_siblings > nr_cpu_ids) { 458 if (smp_num_siblings <= 1)
379 printk(KERN_WARNING "CPU: Unsupported number of siblings %d", 459 goto out;
380 smp_num_siblings);
381 smp_num_siblings = 1;
382 return;
383 }
384 460
385 index_msb = get_count_order(smp_num_siblings); 461 if (smp_num_siblings > nr_cpu_ids) {
386#ifdef CONFIG_X86_64 462 pr_warning("CPU: Unsupported number of siblings %d",
387 c->phys_proc_id = phys_pkg_id(index_msb); 463 smp_num_siblings);
388#else 464 smp_num_siblings = 1;
389 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); 465 return;
390#endif 466 }
391 467
392 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 468 index_msb = get_count_order(smp_num_siblings);
469 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
393 470
394 index_msb = get_count_order(smp_num_siblings); 471 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
395 472
396 core_bits = get_count_order(c->x86_max_cores); 473 index_msb = get_count_order(smp_num_siblings);
397 474
398#ifdef CONFIG_X86_64 475 core_bits = get_count_order(c->x86_max_cores);
399 c->cpu_core_id = phys_pkg_id(index_msb) & 476
400 ((1 << core_bits) - 1); 477 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
401#else 478 ((1 << core_bits) - 1);
402 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
403 ((1 << core_bits) - 1);
404#endif
405 }
406 479
407out: 480out:
408 if ((c->x86_max_cores * smp_num_siblings) > 1) { 481 if ((c->x86_max_cores * smp_num_siblings) > 1) {
@@ -417,8 +490,8 @@ out:
417static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) 490static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
418{ 491{
419 char *v = c->x86_vendor_id; 492 char *v = c->x86_vendor_id;
420 int i;
421 static int printed; 493 static int printed;
494 int i;
422 495
423 for (i = 0; i < X86_VENDOR_NUM; i++) { 496 for (i = 0; i < X86_VENDOR_NUM; i++) {
424 if (!cpu_devs[i]) 497 if (!cpu_devs[i])
@@ -427,6 +500,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
427 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 500 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
428 (cpu_devs[i]->c_ident[1] && 501 (cpu_devs[i]->c_ident[1] &&
429 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 502 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
503
430 this_cpu = cpu_devs[i]; 504 this_cpu = cpu_devs[i];
431 c->x86_vendor = this_cpu->c_x86_vendor; 505 c->x86_vendor = this_cpu->c_x86_vendor;
432 return; 506 return;
@@ -435,7 +509,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
435 509
436 if (!printed) { 510 if (!printed) {
437 printed++; 511 printed++;
438 printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v); 512 printk(KERN_ERR
513 "CPU: vendor_id '%s' unknown, using generic init.\n", v);
514
439 printk(KERN_ERR "CPU: Your system may be unstable.\n"); 515 printk(KERN_ERR "CPU: Your system may be unstable.\n");
440 } 516 }
441 517
@@ -455,14 +531,17 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
455 /* Intel-defined flags: level 0x00000001 */ 531 /* Intel-defined flags: level 0x00000001 */
456 if (c->cpuid_level >= 0x00000001) { 532 if (c->cpuid_level >= 0x00000001) {
457 u32 junk, tfms, cap0, misc; 533 u32 junk, tfms, cap0, misc;
534
458 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 535 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
459 c->x86 = (tfms >> 8) & 0xf; 536 c->x86 = (tfms >> 8) & 0xf;
460 c->x86_model = (tfms >> 4) & 0xf; 537 c->x86_model = (tfms >> 4) & 0xf;
461 c->x86_mask = tfms & 0xf; 538 c->x86_mask = tfms & 0xf;
539
462 if (c->x86 == 0xf) 540 if (c->x86 == 0xf)
463 c->x86 += (tfms >> 20) & 0xff; 541 c->x86 += (tfms >> 20) & 0xff;
464 if (c->x86 >= 0x6) 542 if (c->x86 >= 0x6)
465 c->x86_model += ((tfms >> 16) & 0xf) << 4; 543 c->x86_model += ((tfms >> 16) & 0xf) << 4;
544
466 if (cap0 & (1<<19)) { 545 if (cap0 & (1<<19)) {
467 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 546 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
468 c->x86_cache_alignment = c->x86_clflush_size; 547 c->x86_cache_alignment = c->x86_clflush_size;
@@ -478,6 +557,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
478 /* Intel-defined flags: level 0x00000001 */ 557 /* Intel-defined flags: level 0x00000001 */
479 if (c->cpuid_level >= 0x00000001) { 558 if (c->cpuid_level >= 0x00000001) {
480 u32 capability, excap; 559 u32 capability, excap;
560
481 cpuid(0x00000001, &tfms, &ebx, &excap, &capability); 561 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
482 c->x86_capability[0] = capability; 562 c->x86_capability[0] = capability;
483 c->x86_capability[4] = excap; 563 c->x86_capability[4] = excap;
@@ -486,6 +566,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
486 /* AMD-defined flags: level 0x80000001 */ 566 /* AMD-defined flags: level 0x80000001 */
487 xlvl = cpuid_eax(0x80000000); 567 xlvl = cpuid_eax(0x80000000);
488 c->extended_cpuid_level = xlvl; 568 c->extended_cpuid_level = xlvl;
569
489 if ((xlvl & 0xffff0000) == 0x80000000) { 570 if ((xlvl & 0xffff0000) == 0x80000000) {
490 if (xlvl >= 0x80000001) { 571 if (xlvl >= 0x80000001) {
491 c->x86_capability[1] = cpuid_edx(0x80000001); 572 c->x86_capability[1] = cpuid_edx(0x80000001);
@@ -493,13 +574,15 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
493 } 574 }
494 } 575 }
495 576
496#ifdef CONFIG_X86_64
497 if (c->extended_cpuid_level >= 0x80000008) { 577 if (c->extended_cpuid_level >= 0x80000008) {
498 u32 eax = cpuid_eax(0x80000008); 578 u32 eax = cpuid_eax(0x80000008);
499 579
500 c->x86_virt_bits = (eax >> 8) & 0xff; 580 c->x86_virt_bits = (eax >> 8) & 0xff;
501 c->x86_phys_bits = eax & 0xff; 581 c->x86_phys_bits = eax & 0xff;
502 } 582 }
583#ifdef CONFIG_X86_32
584 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
585 c->x86_phys_bits = 36;
503#endif 586#endif
504 587
505 if (c->extended_cpuid_level >= 0x80000007) 588 if (c->extended_cpuid_level >= 0x80000007)
@@ -546,8 +629,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
546{ 629{
547#ifdef CONFIG_X86_64 630#ifdef CONFIG_X86_64
548 c->x86_clflush_size = 64; 631 c->x86_clflush_size = 64;
632 c->x86_phys_bits = 36;
633 c->x86_virt_bits = 48;
549#else 634#else
550 c->x86_clflush_size = 32; 635 c->x86_clflush_size = 32;
636 c->x86_phys_bits = 32;
637 c->x86_virt_bits = 32;
551#endif 638#endif
552 c->x86_cache_alignment = c->x86_clflush_size; 639 c->x86_cache_alignment = c->x86_clflush_size;
553 640
@@ -570,21 +657,20 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
570 if (this_cpu->c_early_init) 657 if (this_cpu->c_early_init)
571 this_cpu->c_early_init(c); 658 this_cpu->c_early_init(c);
572 659
573 validate_pat_support(c);
574
575#ifdef CONFIG_SMP 660#ifdef CONFIG_SMP
576 c->cpu_index = boot_cpu_id; 661 c->cpu_index = boot_cpu_id;
577#endif 662#endif
663 filter_cpuid_features(c, false);
578} 664}
579 665
580void __init early_cpu_init(void) 666void __init early_cpu_init(void)
581{ 667{
582 struct cpu_dev **cdev; 668 const struct cpu_dev *const *cdev;
583 int count = 0; 669 int count = 0;
584 670
585 printk("KERNEL supported cpus:\n"); 671 printk(KERN_INFO "KERNEL supported cpus:\n");
586 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { 672 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
587 struct cpu_dev *cpudev = *cdev; 673 const struct cpu_dev *cpudev = *cdev;
588 unsigned int j; 674 unsigned int j;
589 675
590 if (count >= X86_VENDOR_NUM) 676 if (count >= X86_VENDOR_NUM)
@@ -595,7 +681,7 @@ void __init early_cpu_init(void)
595 for (j = 0; j < 2; j++) { 681 for (j = 0; j < 2; j++) {
596 if (!cpudev->c_ident[j]) 682 if (!cpudev->c_ident[j])
597 continue; 683 continue;
598 printk(" %s %s\n", cpudev->c_vendor, 684 printk(KERN_INFO " %s %s\n", cpudev->c_vendor,
599 cpudev->c_ident[j]); 685 cpudev->c_ident[j]);
600 } 686 }
601 } 687 }
@@ -637,7 +723,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
637 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; 723 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
638#ifdef CONFIG_X86_32 724#ifdef CONFIG_X86_32
639# ifdef CONFIG_X86_HT 725# ifdef CONFIG_X86_HT
640 c->apicid = phys_pkg_id(c->initial_apicid, 0); 726 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
641# else 727# else
642 c->apicid = c->initial_apicid; 728 c->apicid = c->initial_apicid;
643# endif 729# endif
@@ -671,9 +757,13 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
671 c->x86_coreid_bits = 0; 757 c->x86_coreid_bits = 0;
672#ifdef CONFIG_X86_64 758#ifdef CONFIG_X86_64
673 c->x86_clflush_size = 64; 759 c->x86_clflush_size = 64;
760 c->x86_phys_bits = 36;
761 c->x86_virt_bits = 48;
674#else 762#else
675 c->cpuid_level = -1; /* CPUID not detected */ 763 c->cpuid_level = -1; /* CPUID not detected */
676 c->x86_clflush_size = 32; 764 c->x86_clflush_size = 32;
765 c->x86_phys_bits = 32;
766 c->x86_virt_bits = 32;
677#endif 767#endif
678 c->x86_cache_alignment = c->x86_clflush_size; 768 c->x86_cache_alignment = c->x86_clflush_size;
679 memset(&c->x86_capability, 0, sizeof c->x86_capability); 769 memset(&c->x86_capability, 0, sizeof c->x86_capability);
@@ -684,7 +774,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
684 this_cpu->c_identify(c); 774 this_cpu->c_identify(c);
685 775
686#ifdef CONFIG_X86_64 776#ifdef CONFIG_X86_64
687 c->apicid = phys_pkg_id(0); 777 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
688#endif 778#endif
689 779
690 /* 780 /*
@@ -704,13 +794,16 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
704 squash_the_stupid_serial_number(c); 794 squash_the_stupid_serial_number(c);
705 795
706 /* 796 /*
707 * The vendor-specific functions might have changed features. Now 797 * The vendor-specific functions might have changed features.
708 * we do "generic changes." 798 * Now we do "generic changes."
709 */ 799 */
710 800
801 /* Filter out anything that depends on CPUID levels we don't have */
802 filter_cpuid_features(c, true);
803
711 /* If the model name is still unset, do table lookup. */ 804 /* If the model name is still unset, do table lookup. */
712 if (!c->x86_model_id[0]) { 805 if (!c->x86_model_id[0]) {
713 char *p; 806 const char *p;
714 p = table_lookup_model(c); 807 p = table_lookup_model(c);
715 if (p) 808 if (p)
716 strcpy(c->x86_model_id, p); 809 strcpy(c->x86_model_id, p);
@@ -785,11 +878,11 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
785} 878}
786 879
787struct msr_range { 880struct msr_range {
788 unsigned min; 881 unsigned min;
789 unsigned max; 882 unsigned max;
790}; 883};
791 884
792static struct msr_range msr_range_array[] __cpuinitdata = { 885static const struct msr_range msr_range_array[] __cpuinitconst = {
793 { 0x00000000, 0x00000418}, 886 { 0x00000000, 0x00000418},
794 { 0xc0000000, 0xc000040b}, 887 { 0xc0000000, 0xc000040b},
795 { 0xc0010000, 0xc0010142}, 888 { 0xc0010000, 0xc0010142},
@@ -798,14 +891,15 @@ static struct msr_range msr_range_array[] __cpuinitdata = {
798 891
799static void __cpuinit print_cpu_msr(void) 892static void __cpuinit print_cpu_msr(void)
800{ 893{
894 unsigned index_min, index_max;
801 unsigned index; 895 unsigned index;
802 u64 val; 896 u64 val;
803 int i; 897 int i;
804 unsigned index_min, index_max;
805 898
806 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { 899 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
807 index_min = msr_range_array[i].min; 900 index_min = msr_range_array[i].min;
808 index_max = msr_range_array[i].max; 901 index_max = msr_range_array[i].max;
902
809 for (index = index_min; index < index_max; index++) { 903 for (index = index_min; index < index_max; index++) {
810 if (rdmsrl_amd_safe(index, &val)) 904 if (rdmsrl_amd_safe(index, &val))
811 continue; 905 continue;
@@ -815,6 +909,7 @@ static void __cpuinit print_cpu_msr(void)
815} 909}
816 910
817static int show_msr __cpuinitdata; 911static int show_msr __cpuinitdata;
912
818static __init int setup_show_msr(char *arg) 913static __init int setup_show_msr(char *arg)
819{ 914{
820 int num; 915 int num;
@@ -836,12 +931,14 @@ __setup("noclflush", setup_noclflush);
836 931
837void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 932void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
838{ 933{
839 char *vendor = NULL; 934 const char *vendor = NULL;
840 935
841 if (c->x86_vendor < X86_VENDOR_NUM) 936 if (c->x86_vendor < X86_VENDOR_NUM) {
842 vendor = this_cpu->c_vendor; 937 vendor = this_cpu->c_vendor;
843 else if (c->cpuid_level >= 0) 938 } else {
844 vendor = c->x86_vendor_id; 939 if (c->cpuid_level >= 0)
940 vendor = c->x86_vendor_id;
941 }
845 942
846 if (vendor && !strstr(c->x86_model_id, vendor)) 943 if (vendor && !strstr(c->x86_model_id, vendor))
847 printk(KERN_CONT "%s ", vendor); 944 printk(KERN_CONT "%s ", vendor);
@@ -868,65 +965,45 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
868static __init int setup_disablecpuid(char *arg) 965static __init int setup_disablecpuid(char *arg)
869{ 966{
870 int bit; 967 int bit;
968
871 if (get_option(&arg, &bit) && bit < NCAPINTS*32) 969 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
872 setup_clear_cpu_cap(bit); 970 setup_clear_cpu_cap(bit);
873 else 971 else
874 return 0; 972 return 0;
973
875 return 1; 974 return 1;
876} 975}
877__setup("clearcpuid=", setup_disablecpuid); 976__setup("clearcpuid=", setup_disablecpuid);
878 977
879#ifdef CONFIG_X86_64 978#ifdef CONFIG_X86_64
880struct x8664_pda **_cpu_pda __read_mostly;
881EXPORT_SYMBOL(_cpu_pda);
882
883struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; 979struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
884 980
885static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; 981DEFINE_PER_CPU_FIRST(union irq_stack_union,
982 irq_stack_union) __aligned(PAGE_SIZE);
886 983
887void __cpuinit pda_init(int cpu) 984DEFINE_PER_CPU(char *, irq_stack_ptr) =
888{ 985 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
889 struct x8664_pda *pda = cpu_pda(cpu);
890 986
891 /* Setup up data that may be needed in __get_free_pages early */ 987DEFINE_PER_CPU(unsigned long, kernel_stack) =
892 loadsegment(fs, 0); 988 (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
893 loadsegment(gs, 0); 989EXPORT_PER_CPU_SYMBOL(kernel_stack);
894 /* Memory clobbers used to order PDA accessed */
895 mb();
896 wrmsrl(MSR_GS_BASE, pda);
897 mb();
898
899 pda->cpunumber = cpu;
900 pda->irqcount = -1;
901 pda->kernelstack = (unsigned long)stack_thread_info() -
902 PDA_STACKOFFSET + THREAD_SIZE;
903 pda->active_mm = &init_mm;
904 pda->mmu_state = 0;
905
906 if (cpu == 0) {
907 /* others are initialized in smpboot.c */
908 pda->pcurrent = &init_task;
909 pda->irqstackptr = boot_cpu_stack;
910 pda->irqstackptr += IRQSTACKSIZE - 64;
911 } else {
912 if (!pda->irqstackptr) {
913 pda->irqstackptr = (char *)
914 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
915 if (!pda->irqstackptr)
916 panic("cannot allocate irqstack for cpu %d",
917 cpu);
918 pda->irqstackptr += IRQSTACKSIZE - 64;
919 }
920 990
921 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) 991DEFINE_PER_CPU(unsigned int, irq_count) = -1;
922 pda->nodenumber = cpu_to_node(cpu);
923 }
924}
925 992
926static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + 993/*
927 DEBUG_STKSZ] __page_aligned_bss; 994 * Special IST stacks which the CPU switches to when it calls
995 * an IST-marked descriptor entry. Up to 7 stacks (hardware
996 * limit), all of them are 4K, except the debug stack which
997 * is 8K.
998 */
999static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
1000 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1001 [DEBUG_STACK - 1] = DEBUG_STKSZ
1002};
928 1003
929extern asmlinkage void ignore_sysret(void); 1004static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
1005 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
1006 __aligned(PAGE_SIZE);
930 1007
931/* May not be marked __init: used by software suspend */ 1008/* May not be marked __init: used by software suspend */
932void syscall_init(void) 1009void syscall_init(void)
@@ -957,16 +1034,38 @@ unsigned long kernel_eflags;
957 */ 1034 */
958DEFINE_PER_CPU(struct orig_ist, orig_ist); 1035DEFINE_PER_CPU(struct orig_ist, orig_ist);
959 1036
960#else 1037#else /* CONFIG_X86_64 */
1038
1039#ifdef CONFIG_CC_STACKPROTECTOR
1040DEFINE_PER_CPU(unsigned long, stack_canary);
1041#endif
961 1042
962/* Make sure %fs is initialized properly in idle threads */ 1043/* Make sure %fs and %gs are initialized properly in idle threads */
963struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) 1044struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
964{ 1045{
965 memset(regs, 0, sizeof(struct pt_regs)); 1046 memset(regs, 0, sizeof(struct pt_regs));
966 regs->fs = __KERNEL_PERCPU; 1047 regs->fs = __KERNEL_PERCPU;
1048 regs->gs = __KERNEL_STACK_CANARY;
1049
967 return regs; 1050 return regs;
968} 1051}
969#endif 1052#endif /* CONFIG_X86_64 */
1053
1054/*
1055 * Clear all 6 debug registers:
1056 */
1057static void clear_all_debug_regs(void)
1058{
1059 int i;
1060
1061 for (i = 0; i < 8; i++) {
1062 /* Ignore db4, db5 */
1063 if ((i == 4) || (i == 5))
1064 continue;
1065
1066 set_debugreg(0, i);
1067 }
1068}
970 1069
971/* 1070/*
972 * cpu_init() initializes state that is per-CPU. Some data is already 1071 * cpu_init() initializes state that is per-CPU. Some data is already
@@ -976,21 +1075,25 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
976 * A lot of state is already set up in PDA init for 64 bit 1075 * A lot of state is already set up in PDA init for 64 bit
977 */ 1076 */
978#ifdef CONFIG_X86_64 1077#ifdef CONFIG_X86_64
1078
979void __cpuinit cpu_init(void) 1079void __cpuinit cpu_init(void)
980{ 1080{
981 int cpu = stack_smp_processor_id(); 1081 struct orig_ist *orig_ist;
982 struct tss_struct *t = &per_cpu(init_tss, cpu);
983 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
984 unsigned long v;
985 char *estacks = NULL;
986 struct task_struct *me; 1082 struct task_struct *me;
1083 struct tss_struct *t;
1084 unsigned long v;
1085 int cpu;
987 int i; 1086 int i;
988 1087
989 /* CPU 0 is initialised in head64.c */ 1088 cpu = stack_smp_processor_id();
990 if (cpu != 0) 1089 t = &per_cpu(init_tss, cpu);
991 pda_init(cpu); 1090 orig_ist = &per_cpu(orig_ist, cpu);
992 else 1091
993 estacks = boot_exception_stacks; 1092#ifdef CONFIG_NUMA
1093 if (cpu != 0 && percpu_read(node_number) == 0 &&
1094 cpu_to_node(cpu) != NUMA_NO_NODE)
1095 percpu_write(node_number, cpu_to_node(cpu));
1096#endif
994 1097
995 me = current; 1098 me = current;
996 1099
@@ -1006,7 +1109,9 @@ void __cpuinit cpu_init(void)
1006 * and set up the GDT descriptor: 1109 * and set up the GDT descriptor:
1007 */ 1110 */
1008 1111
1009 switch_to_new_gdt(); 1112 switch_to_new_gdt(cpu);
1113 loadsegment(fs, 0);
1114
1010 load_idt((const struct desc_ptr *)&idt_descr); 1115 load_idt((const struct desc_ptr *)&idt_descr);
1011 1116
1012 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 1117 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
@@ -1017,31 +1122,24 @@ void __cpuinit cpu_init(void)
1017 barrier(); 1122 barrier();
1018 1123
1019 check_efer(); 1124 check_efer();
1020 if (cpu != 0 && x2apic) 1125 if (cpu != 0)
1021 enable_x2apic(); 1126 enable_x2apic();
1022 1127
1023 /* 1128 /*
1024 * set up and load the per-CPU TSS 1129 * set up and load the per-CPU TSS
1025 */ 1130 */
1026 if (!orig_ist->ist[0]) { 1131 if (!orig_ist->ist[0]) {
1027 static const unsigned int order[N_EXCEPTION_STACKS] = { 1132 char *estacks = per_cpu(exception_stacks, cpu);
1028 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, 1133
1029 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
1030 };
1031 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1134 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1032 if (cpu) { 1135 estacks += exception_stack_sizes[v];
1033 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
1034 if (!estacks)
1035 panic("Cannot allocate exception "
1036 "stack %ld %d\n", v, cpu);
1037 }
1038 estacks += PAGE_SIZE << order[v];
1039 orig_ist->ist[v] = t->x86_tss.ist[v] = 1136 orig_ist->ist[v] = t->x86_tss.ist[v] =
1040 (unsigned long)estacks; 1137 (unsigned long)estacks;
1041 } 1138 }
1042 } 1139 }
1043 1140
1044 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1141 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1142
1045 /* 1143 /*
1046 * <= is required because the CPU will access up to 1144 * <= is required because the CPU will access up to
1047 * 8 bits beyond the end of the IO permission bitmap. 1145 * 8 bits beyond the end of the IO permission bitmap.
@@ -1051,8 +1149,7 @@ void __cpuinit cpu_init(void)
1051 1149
1052 atomic_inc(&init_mm.mm_count); 1150 atomic_inc(&init_mm.mm_count);
1053 me->active_mm = &init_mm; 1151 me->active_mm = &init_mm;
1054 if (me->mm) 1152 BUG_ON(me->mm);
1055 BUG();
1056 enter_lazy_tlb(&init_mm, me); 1153 enter_lazy_tlb(&init_mm, me);
1057 1154
1058 load_sp0(t, &current->thread); 1155 load_sp0(t, &current->thread);
@@ -1069,22 +1166,9 @@ void __cpuinit cpu_init(void)
1069 */ 1166 */
1070 if (kgdb_connected && arch_kgdb_ops.correct_hw_break) 1167 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
1071 arch_kgdb_ops.correct_hw_break(); 1168 arch_kgdb_ops.correct_hw_break();
1072 else { 1169 else
1073#endif
1074 /*
1075 * Clear all 6 debug registers:
1076 */
1077
1078 set_debugreg(0UL, 0);
1079 set_debugreg(0UL, 1);
1080 set_debugreg(0UL, 2);
1081 set_debugreg(0UL, 3);
1082 set_debugreg(0UL, 6);
1083 set_debugreg(0UL, 7);
1084#ifdef CONFIG_KGDB
1085 /* If the kgdb is connected no debug regs should be altered. */
1086 }
1087#endif 1170#endif
1171 clear_all_debug_regs();
1088 1172
1089 fpu_init(); 1173 fpu_init();
1090 1174
@@ -1105,7 +1189,8 @@ void __cpuinit cpu_init(void)
1105 1189
1106 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { 1190 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
1107 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); 1191 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
1108 for (;;) local_irq_enable(); 1192 for (;;)
1193 local_irq_enable();
1109 } 1194 }
1110 1195
1111 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1196 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
@@ -1114,15 +1199,14 @@ void __cpuinit cpu_init(void)
1114 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1199 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1115 1200
1116 load_idt(&idt_descr); 1201 load_idt(&idt_descr);
1117 switch_to_new_gdt(); 1202 switch_to_new_gdt(cpu);
1118 1203
1119 /* 1204 /*
1120 * Set up and load the per-CPU TSS and LDT 1205 * Set up and load the per-CPU TSS and LDT
1121 */ 1206 */
1122 atomic_inc(&init_mm.mm_count); 1207 atomic_inc(&init_mm.mm_count);
1123 curr->active_mm = &init_mm; 1208 curr->active_mm = &init_mm;
1124 if (curr->mm) 1209 BUG_ON(curr->mm);
1125 BUG();
1126 enter_lazy_tlb(&init_mm, curr); 1210 enter_lazy_tlb(&init_mm, curr);
1127 1211
1128 load_sp0(t, thread); 1212 load_sp0(t, thread);
@@ -1135,16 +1219,7 @@ void __cpuinit cpu_init(void)
1135 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1219 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1136#endif 1220#endif
1137 1221
1138 /* Clear %gs. */ 1222 clear_all_debug_regs();
1139 asm volatile ("mov %0, %%gs" : : "r" (0));
1140
1141 /* Clear all 6 debug registers: */
1142 set_debugreg(0, 0);
1143 set_debugreg(0, 1);
1144 set_debugreg(0, 2);
1145 set_debugreg(0, 3);
1146 set_debugreg(0, 6);
1147 set_debugreg(0, 7);
1148 1223
1149 /* 1224 /*
1150 * Force FPU initialization: 1225 * Force FPU initialization:
@@ -1164,6 +1239,4 @@ void __cpuinit cpu_init(void)
1164 1239
1165 xsave_init(); 1240 xsave_init();
1166} 1241}
1167
1168
1169#endif 1242#endif
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index de4094a3921..9469ecb5aeb 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -5,15 +5,15 @@
5struct cpu_model_info { 5struct cpu_model_info {
6 int vendor; 6 int vendor;
7 int family; 7 int family;
8 char *model_names[16]; 8 const char *model_names[16];
9}; 9};
10 10
11/* attempt to consolidate cpu attributes */ 11/* attempt to consolidate cpu attributes */
12struct cpu_dev { 12struct cpu_dev {
13 char * c_vendor; 13 const char * c_vendor;
14 14
15 /* some have two possibilities for cpuid string */ 15 /* some have two possibilities for cpuid string */
16 char * c_ident[2]; 16 const char * c_ident[2];
17 17
18 struct cpu_model_info c_models[4]; 18 struct cpu_model_info c_models[4];
19 19
@@ -25,11 +25,12 @@ struct cpu_dev {
25}; 25};
26 26
27#define cpu_dev_register(cpu_devX) \ 27#define cpu_dev_register(cpu_devX) \
28 static struct cpu_dev *__cpu_dev_##cpu_devX __used \ 28 static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \
29 __attribute__((__section__(".x86_cpu_dev.init"))) = \ 29 __attribute__((__section__(".x86_cpu_dev.init"))) = \
30 &cpu_devX; 30 &cpu_devX;
31 31
32extern struct cpu_dev *__x86_cpu_dev_start[], *__x86_cpu_dev_end[]; 32extern const struct cpu_dev *const __x86_cpu_dev_start[],
33 *const __x86_cpu_dev_end[];
33 34
34extern void display_cacheinfo(struct cpuinfo_x86 *c); 35extern void display_cacheinfo(struct cpuinfo_x86 *c);
35 36
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
new file mode 100755
index 00000000000..21c0cf8ced1
--- /dev/null
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -0,0 +1,839 @@
1/*
2 * CPU x86 architecture debug code
3 *
4 * Copyright(C) 2009 Jaswinder Singh Rajput
5 *
6 * For licencing details see kernel-base/COPYING
7 */
8
9#include <linux/interrupt.h>
10#include <linux/compiler.h>
11#include <linux/seq_file.h>
12#include <linux/debugfs.h>
13#include <linux/kprobes.h>
14#include <linux/uaccess.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/percpu.h>
18#include <linux/signal.h>
19#include <linux/errno.h>
20#include <linux/sched.h>
21#include <linux/types.h>
22#include <linux/init.h>
23#include <linux/slab.h>
24#include <linux/smp.h>
25
26#include <asm/cpu_debug.h>
27#include <asm/paravirt.h>
28#include <asm/system.h>
29#include <asm/traps.h>
30#include <asm/apic.h>
31#include <asm/desc.h>
32
33static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]);
34static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]);
35static DEFINE_PER_CPU(unsigned, cpu_modelflag);
36static DEFINE_PER_CPU(int, cpu_priv_count);
37static DEFINE_PER_CPU(unsigned, cpu_model);
38
39static DEFINE_MUTEX(cpu_debug_lock);
40
41static struct dentry *cpu_debugfs_dir;
42
43static struct cpu_debug_base cpu_base[] = {
44 { "mc", CPU_MC, 0 },
45 { "monitor", CPU_MONITOR, 0 },
46 { "time", CPU_TIME, 0 },
47 { "pmc", CPU_PMC, 1 },
48 { "platform", CPU_PLATFORM, 0 },
49 { "apic", CPU_APIC, 0 },
50 { "poweron", CPU_POWERON, 0 },
51 { "control", CPU_CONTROL, 0 },
52 { "features", CPU_FEATURES, 0 },
53 { "lastbranch", CPU_LBRANCH, 0 },
54 { "bios", CPU_BIOS, 0 },
55 { "freq", CPU_FREQ, 0 },
56 { "mtrr", CPU_MTRR, 0 },
57 { "perf", CPU_PERF, 0 },
58 { "cache", CPU_CACHE, 0 },
59 { "sysenter", CPU_SYSENTER, 0 },
60 { "therm", CPU_THERM, 0 },
61 { "misc", CPU_MISC, 0 },
62 { "debug", CPU_DEBUG, 0 },
63 { "pat", CPU_PAT, 0 },
64 { "vmx", CPU_VMX, 0 },
65 { "call", CPU_CALL, 0 },
66 { "base", CPU_BASE, 0 },
67 { "smm", CPU_SMM, 0 },
68 { "svm", CPU_SVM, 0 },
69 { "osvm", CPU_OSVM, 0 },
70 { "tss", CPU_TSS, 0 },
71 { "cr", CPU_CR, 0 },
72 { "dt", CPU_DT, 0 },
73 { "registers", CPU_REG_ALL, 0 },
74};
75
76static struct cpu_file_base cpu_file[] = {
77 { "index", CPU_REG_ALL, 0 },
78 { "value", CPU_REG_ALL, 1 },
79};
80
81/* Intel Registers Range */
82static struct cpu_debug_range cpu_intel_range[] = {
83 { 0x00000000, 0x00000001, CPU_MC, CPU_INTEL_ALL },
84 { 0x00000006, 0x00000007, CPU_MONITOR, CPU_CX_AT_XE },
85 { 0x00000010, 0x00000010, CPU_TIME, CPU_INTEL_ALL },
86 { 0x00000011, 0x00000013, CPU_PMC, CPU_INTEL_PENTIUM },
87 { 0x00000017, 0x00000017, CPU_PLATFORM, CPU_PX_CX_AT_XE },
88 { 0x0000001B, 0x0000001B, CPU_APIC, CPU_P6_CX_AT_XE },
89
90 { 0x0000002A, 0x0000002A, CPU_POWERON, CPU_PX_CX_AT_XE },
91 { 0x0000002B, 0x0000002B, CPU_POWERON, CPU_INTEL_XEON },
92 { 0x0000002C, 0x0000002C, CPU_FREQ, CPU_INTEL_XEON },
93 { 0x0000003A, 0x0000003A, CPU_CONTROL, CPU_CX_AT_XE },
94
95 { 0x00000040, 0x00000043, CPU_LBRANCH, CPU_PM_CX_AT_XE },
96 { 0x00000044, 0x00000047, CPU_LBRANCH, CPU_PM_CO_AT },
97 { 0x00000060, 0x00000063, CPU_LBRANCH, CPU_C2_AT },
98 { 0x00000064, 0x00000067, CPU_LBRANCH, CPU_INTEL_ATOM },
99
100 { 0x00000079, 0x00000079, CPU_BIOS, CPU_P6_CX_AT_XE },
101 { 0x00000088, 0x0000008A, CPU_CACHE, CPU_INTEL_P6 },
102 { 0x0000008B, 0x0000008B, CPU_BIOS, CPU_P6_CX_AT_XE },
103 { 0x0000009B, 0x0000009B, CPU_MONITOR, CPU_INTEL_XEON },
104
105 { 0x000000C1, 0x000000C2, CPU_PMC, CPU_P6_CX_AT },
106 { 0x000000CD, 0x000000CD, CPU_FREQ, CPU_CX_AT },
107 { 0x000000E7, 0x000000E8, CPU_PERF, CPU_CX_AT },
108 { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_P6_CX_XE },
109
110 { 0x00000116, 0x00000116, CPU_CACHE, CPU_INTEL_P6 },
111 { 0x00000118, 0x00000118, CPU_CACHE, CPU_INTEL_P6 },
112 { 0x00000119, 0x00000119, CPU_CACHE, CPU_INTEL_PX },
113 { 0x0000011A, 0x0000011B, CPU_CACHE, CPU_INTEL_P6 },
114 { 0x0000011E, 0x0000011E, CPU_CACHE, CPU_PX_CX_AT },
115
116 { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_P6_CX_AT_XE },
117 { 0x00000179, 0x0000017A, CPU_MC, CPU_PX_CX_AT_XE },
118 { 0x0000017B, 0x0000017B, CPU_MC, CPU_P6_XE },
119 { 0x00000186, 0x00000187, CPU_PMC, CPU_P6_CX_AT },
120 { 0x00000198, 0x00000199, CPU_PERF, CPU_PM_CX_AT_XE },
121 { 0x0000019A, 0x0000019A, CPU_TIME, CPU_PM_CX_AT_XE },
122 { 0x0000019B, 0x0000019D, CPU_THERM, CPU_PM_CX_AT_XE },
123 { 0x000001A0, 0x000001A0, CPU_MISC, CPU_PM_CX_AT_XE },
124
125 { 0x000001C9, 0x000001C9, CPU_LBRANCH, CPU_PM_CX_AT },
126 { 0x000001D7, 0x000001D8, CPU_LBRANCH, CPU_INTEL_XEON },
127 { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_CX_AT_XE },
128 { 0x000001DA, 0x000001DA, CPU_LBRANCH, CPU_INTEL_XEON },
129 { 0x000001DB, 0x000001DB, CPU_LBRANCH, CPU_P6_XE },
130 { 0x000001DC, 0x000001DC, CPU_LBRANCH, CPU_INTEL_P6 },
131 { 0x000001DD, 0x000001DE, CPU_LBRANCH, CPU_PX_CX_AT_XE },
132 { 0x000001E0, 0x000001E0, CPU_LBRANCH, CPU_INTEL_P6 },
133
134 { 0x00000200, 0x0000020F, CPU_MTRR, CPU_P6_CX_XE },
135 { 0x00000250, 0x00000250, CPU_MTRR, CPU_P6_CX_XE },
136 { 0x00000258, 0x00000259, CPU_MTRR, CPU_P6_CX_XE },
137 { 0x00000268, 0x0000026F, CPU_MTRR, CPU_P6_CX_XE },
138 { 0x00000277, 0x00000277, CPU_PAT, CPU_C2_AT_XE },
139 { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_P6_CX_XE },
140
141 { 0x00000300, 0x00000308, CPU_PMC, CPU_INTEL_XEON },
142 { 0x00000309, 0x0000030B, CPU_PMC, CPU_C2_AT_XE },
143 { 0x0000030C, 0x00000311, CPU_PMC, CPU_INTEL_XEON },
144 { 0x00000345, 0x00000345, CPU_PMC, CPU_C2_AT },
145 { 0x00000360, 0x00000371, CPU_PMC, CPU_INTEL_XEON },
146 { 0x0000038D, 0x00000390, CPU_PMC, CPU_C2_AT },
147 { 0x000003A0, 0x000003BE, CPU_PMC, CPU_INTEL_XEON },
148 { 0x000003C0, 0x000003CD, CPU_PMC, CPU_INTEL_XEON },
149 { 0x000003E0, 0x000003E1, CPU_PMC, CPU_INTEL_XEON },
150 { 0x000003F0, 0x000003F0, CPU_PMC, CPU_INTEL_XEON },
151 { 0x000003F1, 0x000003F1, CPU_PMC, CPU_C2_AT_XE },
152 { 0x000003F2, 0x000003F2, CPU_PMC, CPU_INTEL_XEON },
153
154 { 0x00000400, 0x00000402, CPU_MC, CPU_PM_CX_AT_XE },
155 { 0x00000403, 0x00000403, CPU_MC, CPU_INTEL_XEON },
156 { 0x00000404, 0x00000406, CPU_MC, CPU_PM_CX_AT_XE },
157 { 0x00000407, 0x00000407, CPU_MC, CPU_INTEL_XEON },
158 { 0x00000408, 0x0000040A, CPU_MC, CPU_PM_CX_AT_XE },
159 { 0x0000040B, 0x0000040B, CPU_MC, CPU_INTEL_XEON },
160 { 0x0000040C, 0x0000040E, CPU_MC, CPU_PM_CX_XE },
161 { 0x0000040F, 0x0000040F, CPU_MC, CPU_INTEL_XEON },
162 { 0x00000410, 0x00000412, CPU_MC, CPU_PM_CX_AT_XE },
163 { 0x00000413, 0x00000417, CPU_MC, CPU_CX_AT_XE },
164 { 0x00000480, 0x0000048B, CPU_VMX, CPU_CX_AT_XE },
165
166 { 0x00000600, 0x00000600, CPU_DEBUG, CPU_PM_CX_AT_XE },
167 { 0x00000680, 0x0000068F, CPU_LBRANCH, CPU_INTEL_XEON },
168 { 0x000006C0, 0x000006CF, CPU_LBRANCH, CPU_INTEL_XEON },
169
170 { 0x000107CC, 0x000107D3, CPU_PMC, CPU_INTEL_XEON_MP },
171
172 { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_INTEL_XEON },
173 { 0xC0000081, 0xC0000082, CPU_CALL, CPU_INTEL_XEON },
174 { 0xC0000084, 0xC0000084, CPU_CALL, CPU_INTEL_XEON },
175 { 0xC0000100, 0xC0000102, CPU_BASE, CPU_INTEL_XEON },
176};
177
178/* AMD Registers Range */
179static struct cpu_debug_range cpu_amd_range[] = {
180 { 0x00000010, 0x00000010, CPU_TIME, CPU_ALL, },
181 { 0x0000001B, 0x0000001B, CPU_APIC, CPU_ALL, },
182 { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_ALL, },
183
184 { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_ALL, },
185 { 0x00000179, 0x0000017A, CPU_MC, CPU_ALL, },
186 { 0x0000017B, 0x0000017B, CPU_MC, CPU_ALL, },
187 { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_ALL, },
188 { 0x000001DB, 0x000001DE, CPU_LBRANCH, CPU_ALL, },
189
190 { 0x00000200, 0x0000020F, CPU_MTRR, CPU_ALL, },
191 { 0x00000250, 0x00000250, CPU_MTRR, CPU_ALL, },
192 { 0x00000258, 0x00000259, CPU_MTRR, CPU_ALL, },
193 { 0x00000268, 0x0000026F, CPU_MTRR, CPU_ALL, },
194 { 0x00000277, 0x00000277, CPU_PAT, CPU_ALL, },
195 { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_ALL, },
196
197 { 0x00000400, 0x00000417, CPU_MC, CPU_ALL, },
198
199 { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_ALL, },
200 { 0xC0000081, 0xC0000084, CPU_CALL, CPU_ALL, },
201 { 0xC0000100, 0xC0000102, CPU_BASE, CPU_ALL, },
202 { 0xC0000103, 0xC0000103, CPU_TIME, CPU_ALL, },
203
204 { 0xC0000408, 0xC000040A, CPU_MC, CPU_ALL, },
205
206 { 0xc0010000, 0xc0010007, CPU_PMC, CPU_ALL, },
207 { 0xc0010010, 0xc0010010, CPU_MTRR, CPU_ALL, },
208 { 0xc0010016, 0xc001001A, CPU_MTRR, CPU_ALL, },
209 { 0xc001001D, 0xc001001D, CPU_MTRR, CPU_ALL, },
210 { 0xc0010030, 0xc0010035, CPU_BIOS, CPU_ALL, },
211 { 0xc0010056, 0xc0010056, CPU_SMM, CPU_ALL, },
212 { 0xc0010061, 0xc0010063, CPU_SMM, CPU_ALL, },
213 { 0xc0010074, 0xc0010074, CPU_MC, CPU_ALL, },
214 { 0xc0010111, 0xc0010113, CPU_SMM, CPU_ALL, },
215 { 0xc0010114, 0xc0010118, CPU_SVM, CPU_ALL, },
216 { 0xc0010119, 0xc001011A, CPU_SMM, CPU_ALL, },
217 { 0xc0010140, 0xc0010141, CPU_OSVM, CPU_ALL, },
218 { 0xc0010156, 0xc0010156, CPU_SMM, CPU_ALL, },
219};
220
221
222static int get_cpu_modelflag(unsigned cpu)
223{
224 int flag;
225
226 switch (per_cpu(cpu_model, cpu)) {
227 /* Intel */
228 case 0x0501:
229 case 0x0502:
230 case 0x0504:
231 flag = CPU_INTEL_PENTIUM;
232 break;
233 case 0x0601:
234 case 0x0603:
235 case 0x0605:
236 case 0x0607:
237 case 0x0608:
238 case 0x060A:
239 case 0x060B:
240 flag = CPU_INTEL_P6;
241 break;
242 case 0x0609:
243 case 0x060D:
244 flag = CPU_INTEL_PENTIUM_M;
245 break;
246 case 0x060E:
247 flag = CPU_INTEL_CORE;
248 break;
249 case 0x060F:
250 case 0x0617:
251 flag = CPU_INTEL_CORE2;
252 break;
253 case 0x061C:
254 flag = CPU_INTEL_ATOM;
255 break;
256 case 0x0F00:
257 case 0x0F01:
258 case 0x0F02:
259 case 0x0F03:
260 case 0x0F04:
261 flag = CPU_INTEL_XEON_P4;
262 break;
263 case 0x0F06:
264 flag = CPU_INTEL_XEON_MP;
265 break;
266 default:
267 flag = CPU_NONE;
268 break;
269 }
270
271 return flag;
272}
273
274static int get_cpu_range_count(unsigned cpu)
275{
276 int index;
277
278 switch (per_cpu(cpu_model, cpu) >> 16) {
279 case X86_VENDOR_INTEL:
280 index = ARRAY_SIZE(cpu_intel_range);
281 break;
282 case X86_VENDOR_AMD:
283 index = ARRAY_SIZE(cpu_amd_range);
284 break;
285 default:
286 index = 0;
287 break;
288 }
289
290 return index;
291}
292
293static int is_typeflag_valid(unsigned cpu, unsigned flag)
294{
295 unsigned vendor, modelflag;
296 int i, index;
297
298 /* Standard Registers should be always valid */
299 if (flag >= CPU_TSS)
300 return 1;
301
302 modelflag = per_cpu(cpu_modelflag, cpu);
303 vendor = per_cpu(cpu_model, cpu) >> 16;
304 index = get_cpu_range_count(cpu);
305
306 for (i = 0; i < index; i++) {
307 switch (vendor) {
308 case X86_VENDOR_INTEL:
309 if ((cpu_intel_range[i].model & modelflag) &&
310 (cpu_intel_range[i].flag & flag))
311 return 1;
312 break;
313 case X86_VENDOR_AMD:
314 if (cpu_amd_range[i].flag & flag)
315 return 1;
316 break;
317 }
318 }
319
320 /* Invalid */
321 return 0;
322}
323
324static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max,
325 int index, unsigned flag)
326{
327 unsigned modelflag;
328
329 modelflag = per_cpu(cpu_modelflag, cpu);
330 *max = 0;
331 switch (per_cpu(cpu_model, cpu) >> 16) {
332 case X86_VENDOR_INTEL:
333 if ((cpu_intel_range[index].model & modelflag) &&
334 (cpu_intel_range[index].flag & flag)) {
335 *min = cpu_intel_range[index].min;
336 *max = cpu_intel_range[index].max;
337 }
338 break;
339 case X86_VENDOR_AMD:
340 if (cpu_amd_range[index].flag & flag) {
341 *min = cpu_amd_range[index].min;
342 *max = cpu_amd_range[index].max;
343 }
344 break;
345 }
346
347 return *max;
348}
349
350/* This function can also be called with seq = NULL for printk */
351static void print_cpu_data(struct seq_file *seq, unsigned type,
352 u32 low, u32 high)
353{
354 struct cpu_private *priv;
355 u64 val = high;
356
357 if (seq) {
358 priv = seq->private;
359 if (priv->file) {
360 val = (val << 32) | low;
361 seq_printf(seq, "0x%llx\n", val);
362 } else
363 seq_printf(seq, " %08x: %08x_%08x\n",
364 type, high, low);
365 } else
366 printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low);
367}
368
369/* This function can also be called with seq = NULL for printk */
370static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag)
371{
372 unsigned msr, msr_min, msr_max;
373 struct cpu_private *priv;
374 u32 low, high;
375 int i, range;
376
377 if (seq) {
378 priv = seq->private;
379 if (priv->file) {
380 if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg,
381 &low, &high))
382 print_cpu_data(seq, priv->reg, low, high);
383 return;
384 }
385 }
386
387 range = get_cpu_range_count(cpu);
388
389 for (i = 0; i < range; i++) {
390 if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag))
391 continue;
392
393 for (msr = msr_min; msr <= msr_max; msr++) {
394 if (rdmsr_safe_on_cpu(cpu, msr, &low, &high))
395 continue;
396 print_cpu_data(seq, msr, low, high);
397 }
398 }
399}
400
401static void print_tss(void *arg)
402{
403 struct pt_regs *regs = task_pt_regs(current);
404 struct seq_file *seq = arg;
405 unsigned int seg;
406
407 seq_printf(seq, " RAX\t: %016lx\n", regs->ax);
408 seq_printf(seq, " RBX\t: %016lx\n", regs->bx);
409 seq_printf(seq, " RCX\t: %016lx\n", regs->cx);
410 seq_printf(seq, " RDX\t: %016lx\n", regs->dx);
411
412 seq_printf(seq, " RSI\t: %016lx\n", regs->si);
413 seq_printf(seq, " RDI\t: %016lx\n", regs->di);
414 seq_printf(seq, " RBP\t: %016lx\n", regs->bp);
415 seq_printf(seq, " ESP\t: %016lx\n", regs->sp);
416
417#ifdef CONFIG_X86_64
418 seq_printf(seq, " R08\t: %016lx\n", regs->r8);
419 seq_printf(seq, " R09\t: %016lx\n", regs->r9);
420 seq_printf(seq, " R10\t: %016lx\n", regs->r10);
421 seq_printf(seq, " R11\t: %016lx\n", regs->r11);
422 seq_printf(seq, " R12\t: %016lx\n", regs->r12);
423 seq_printf(seq, " R13\t: %016lx\n", regs->r13);
424 seq_printf(seq, " R14\t: %016lx\n", regs->r14);
425 seq_printf(seq, " R15\t: %016lx\n", regs->r15);
426#endif
427
428 asm("movl %%cs,%0" : "=r" (seg));
429 seq_printf(seq, " CS\t: %04x\n", seg);
430 asm("movl %%ds,%0" : "=r" (seg));
431 seq_printf(seq, " DS\t: %04x\n", seg);
432 seq_printf(seq, " SS\t: %04lx\n", regs->ss & 0xffff);
433 asm("movl %%es,%0" : "=r" (seg));
434 seq_printf(seq, " ES\t: %04x\n", seg);
435 asm("movl %%fs,%0" : "=r" (seg));
436 seq_printf(seq, " FS\t: %04x\n", seg);
437 asm("movl %%gs,%0" : "=r" (seg));
438 seq_printf(seq, " GS\t: %04x\n", seg);
439
440 seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags);
441
442 seq_printf(seq, " EIP\t: %016lx\n", regs->ip);
443}
444
445static void print_cr(void *arg)
446{
447 struct seq_file *seq = arg;
448
449 seq_printf(seq, " cr0\t: %016lx\n", read_cr0());
450 seq_printf(seq, " cr2\t: %016lx\n", read_cr2());
451 seq_printf(seq, " cr3\t: %016lx\n", read_cr3());
452 seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe());
453#ifdef CONFIG_X86_64
454 seq_printf(seq, " cr8\t: %016lx\n", read_cr8());
455#endif
456}
457
458static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt)
459{
460 seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size));
461}
462
463static void print_dt(void *seq)
464{
465 struct desc_ptr dt;
466 unsigned long ldt;
467
468 /* IDT */
469 store_idt((struct desc_ptr *)&dt);
470 print_desc_ptr("IDT", seq, dt);
471
472 /* GDT */
473 store_gdt((struct desc_ptr *)&dt);
474 print_desc_ptr("GDT", seq, dt);
475
476 /* LDT */
477 store_ldt(ldt);
478 seq_printf(seq, " LDT\t: %016lx\n", ldt);
479
480 /* TR */
481 store_tr(ldt);
482 seq_printf(seq, " TR\t: %016lx\n", ldt);
483}
484
485static void print_dr(void *arg)
486{
487 struct seq_file *seq = arg;
488 unsigned long dr;
489 int i;
490
491 for (i = 0; i < 8; i++) {
492 /* Ignore db4, db5 */
493 if ((i == 4) || (i == 5))
494 continue;
495 get_debugreg(dr, i);
496 seq_printf(seq, " dr%d\t: %016lx\n", i, dr);
497 }
498
499 seq_printf(seq, "\n MSR\t:\n");
500}
501
502static void print_apic(void *arg)
503{
504 struct seq_file *seq = arg;
505
506#ifdef CONFIG_X86_LOCAL_APIC
507 seq_printf(seq, " LAPIC\t:\n");
508 seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24);
509 seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR));
510 seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI));
511 seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI));
512 seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI));
513 seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR));
514 seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR));
515 seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV));
516 seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR));
517 seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR));
518 seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR));
519 seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2));
520 seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT));
521 seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR));
522 seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC));
523 seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0));
524 seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1));
525 seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR));
526 seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT));
527 seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT));
528 seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR));
529#endif /* CONFIG_X86_LOCAL_APIC */
530
531 seq_printf(seq, "\n MSR\t:\n");
532}
533
534static int cpu_seq_show(struct seq_file *seq, void *v)
535{
536 struct cpu_private *priv = seq->private;
537
538 if (priv == NULL)
539 return -EINVAL;
540
541 switch (cpu_base[priv->type].flag) {
542 case CPU_TSS:
543 smp_call_function_single(priv->cpu, print_tss, seq, 1);
544 break;
545 case CPU_CR:
546 smp_call_function_single(priv->cpu, print_cr, seq, 1);
547 break;
548 case CPU_DT:
549 smp_call_function_single(priv->cpu, print_dt, seq, 1);
550 break;
551 case CPU_DEBUG:
552 if (priv->file == CPU_INDEX_BIT)
553 smp_call_function_single(priv->cpu, print_dr, seq, 1);
554 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
555 break;
556 case CPU_APIC:
557 if (priv->file == CPU_INDEX_BIT)
558 smp_call_function_single(priv->cpu, print_apic, seq, 1);
559 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
560 break;
561
562 default:
563 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
564 break;
565 }
566 seq_printf(seq, "\n");
567
568 return 0;
569}
570
571static void *cpu_seq_start(struct seq_file *seq, loff_t *pos)
572{
573 if (*pos == 0) /* One time is enough ;-) */
574 return seq;
575
576 return NULL;
577}
578
579static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
580{
581 (*pos)++;
582
583 return cpu_seq_start(seq, pos);
584}
585
586static void cpu_seq_stop(struct seq_file *seq, void *v)
587{
588}
589
590static const struct seq_operations cpu_seq_ops = {
591 .start = cpu_seq_start,
592 .next = cpu_seq_next,
593 .stop = cpu_seq_stop,
594 .show = cpu_seq_show,
595};
596
597static int cpu_seq_open(struct inode *inode, struct file *file)
598{
599 struct cpu_private *priv = inode->i_private;
600 struct seq_file *seq;
601 int err;
602
603 err = seq_open(file, &cpu_seq_ops);
604 if (!err) {
605 seq = file->private_data;
606 seq->private = priv;
607 }
608
609 return err;
610}
611
612static int write_msr(struct cpu_private *priv, u64 val)
613{
614 u32 low, high;
615
616 high = (val >> 32) & 0xffffffff;
617 low = val & 0xffffffff;
618
619 if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high))
620 return 0;
621
622 return -EPERM;
623}
624
625static int write_cpu_register(struct cpu_private *priv, const char *buf)
626{
627 int ret = -EPERM;
628 u64 val;
629
630 ret = strict_strtoull(buf, 0, &val);
631 if (ret < 0)
632 return ret;
633
634 /* Supporting only MSRs */
635 if (priv->type < CPU_TSS_BIT)
636 return write_msr(priv, val);
637
638 return ret;
639}
640
641static ssize_t cpu_write(struct file *file, const char __user *ubuf,
642 size_t count, loff_t *off)
643{
644 struct seq_file *seq = file->private_data;
645 struct cpu_private *priv = seq->private;
646 char buf[19];
647
648 if ((priv == NULL) || (count >= sizeof(buf)))
649 return -EINVAL;
650
651 if (copy_from_user(&buf, ubuf, count))
652 return -EFAULT;
653
654 buf[count] = 0;
655
656 if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write))
657 if (!write_cpu_register(priv, buf))
658 return count;
659
660 return -EACCES;
661}
662
663static const struct file_operations cpu_fops = {
664 .owner = THIS_MODULE,
665 .open = cpu_seq_open,
666 .read = seq_read,
667 .write = cpu_write,
668 .llseek = seq_lseek,
669 .release = seq_release,
670};
671
672static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
673 unsigned file, struct dentry *dentry)
674{
675 struct cpu_private *priv = NULL;
676
677 /* Already intialized */
678 if (file == CPU_INDEX_BIT)
679 if (per_cpu(cpu_arr[type].init, cpu))
680 return 0;
681
682 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
683 if (priv == NULL)
684 return -ENOMEM;
685
686 priv->cpu = cpu;
687 priv->type = type;
688 priv->reg = reg;
689 priv->file = file;
690 mutex_lock(&cpu_debug_lock);
691 per_cpu(priv_arr[type], cpu) = priv;
692 per_cpu(cpu_priv_count, cpu)++;
693 mutex_unlock(&cpu_debug_lock);
694
695 if (file)
696 debugfs_create_file(cpu_file[file].name, S_IRUGO,
697 dentry, (void *)priv, &cpu_fops);
698 else {
699 debugfs_create_file(cpu_base[type].name, S_IRUGO,
700 per_cpu(cpu_arr[type].dentry, cpu),
701 (void *)priv, &cpu_fops);
702 mutex_lock(&cpu_debug_lock);
703 per_cpu(cpu_arr[type].init, cpu) = 1;
704 mutex_unlock(&cpu_debug_lock);
705 }
706
707 return 0;
708}
709
710static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg,
711 struct dentry *dentry)
712{
713 unsigned file;
714 int err = 0;
715
716 for (file = 0; file < ARRAY_SIZE(cpu_file); file++) {
717 err = cpu_create_file(cpu, type, reg, file, dentry);
718 if (err)
719 return err;
720 }
721
722 return err;
723}
724
725static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry)
726{
727 struct dentry *cpu_dentry = NULL;
728 unsigned reg, reg_min, reg_max;
729 int i, range, err = 0;
730 char reg_dir[12];
731 u32 low, high;
732
733 range = get_cpu_range_count(cpu);
734
735 for (i = 0; i < range; i++) {
736 if (!get_cpu_range(cpu, &reg_min, &reg_max, i,
737 cpu_base[type].flag))
738 continue;
739
740 for (reg = reg_min; reg <= reg_max; reg++) {
741 if (rdmsr_safe_on_cpu(cpu, reg, &low, &high))
742 continue;
743
744 sprintf(reg_dir, "0x%x", reg);
745 cpu_dentry = debugfs_create_dir(reg_dir, dentry);
746 err = cpu_init_regfiles(cpu, type, reg, cpu_dentry);
747 if (err)
748 return err;
749 }
750 }
751
752 return err;
753}
754
755static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
756{
757 struct dentry *cpu_dentry = NULL;
758 unsigned type;
759 int err = 0;
760
761 for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) {
762 if (!is_typeflag_valid(cpu, cpu_base[type].flag))
763 continue;
764 cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
765 per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
766
767 if (type < CPU_TSS_BIT)
768 err = cpu_init_msr(cpu, type, cpu_dentry);
769 else
770 err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT,
771 cpu_dentry);
772 if (err)
773 return err;
774 }
775
776 return err;
777}
778
779static int cpu_init_cpu(void)
780{
781 struct dentry *cpu_dentry = NULL;
782 struct cpuinfo_x86 *cpui;
783 char cpu_dir[12];
784 unsigned cpu;
785 int err = 0;
786
787 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
788 cpui = &cpu_data(cpu);
789 if (!cpu_has(cpui, X86_FEATURE_MSR))
790 continue;
791 per_cpu(cpu_model, cpu) = ((cpui->x86_vendor << 16) |
792 (cpui->x86 << 8) |
793 (cpui->x86_model));
794 per_cpu(cpu_modelflag, cpu) = get_cpu_modelflag(cpu);
795
796 sprintf(cpu_dir, "cpu%d", cpu);
797 cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir);
798 err = cpu_init_allreg(cpu, cpu_dentry);
799
800 pr_info("cpu%d(%d) debug files %d\n",
801 cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
802 if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
803 pr_err("Register files count %d exceeds limit %d\n",
804 per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
805 per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
806 err = -ENFILE;
807 }
808 if (err)
809 return err;
810 }
811
812 return err;
813}
814
815static int __init cpu_debug_init(void)
816{
817 cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir);
818
819 return cpu_init_cpu();
820}
821
822static void __exit cpu_debug_exit(void)
823{
824 int i, cpu;
825
826 if (cpu_debugfs_dir)
827 debugfs_remove_recursive(cpu_debugfs_dir);
828
829 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
830 for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
831 kfree(per_cpu(priv_arr[i], cpu));
832}
833
834module_init(cpu_debug_init);
835module_exit(cpu_debug_exit);
836
837MODULE_AUTHOR("Jaswinder Singh Rajput");
838MODULE_DESCRIPTION("CPU Debug module");
839MODULE_LICENSE("GPL");
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 4b1c319d30c..22590cf688a 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -601,7 +601,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
601 if (!data) 601 if (!data)
602 return -ENOMEM; 602 return -ENOMEM;
603 603
604 data->acpi_data = percpu_ptr(acpi_perf_data, cpu); 604 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
605 per_cpu(drv_data, cpu) = data; 605 per_cpu(drv_data, cpu) = data;
606 606
607 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) 607 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
index c2f930d8664..41ab3f064cb 100644
--- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
+++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
@@ -204,12 +204,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
204 } 204 }
205 /* Enable Enhanced PowerSaver */ 205 /* Enable Enhanced PowerSaver */
206 rdmsrl(MSR_IA32_MISC_ENABLE, val); 206 rdmsrl(MSR_IA32_MISC_ENABLE, val);
207 if (!(val & 1 << 16)) { 207 if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
208 val |= 1 << 16; 208 val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
209 wrmsrl(MSR_IA32_MISC_ENABLE, val); 209 wrmsrl(MSR_IA32_MISC_ENABLE, val);
210 /* Can be locked at 0 */ 210 /* Can be locked at 0 */
211 rdmsrl(MSR_IA32_MISC_ENABLE, val); 211 rdmsrl(MSR_IA32_MISC_ENABLE, val);
212 if (!(val & 1 << 16)) { 212 if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
213 printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n"); 213 printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n");
214 return -ENODEV; 214 return -ENODEV;
215 } 215 }
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index f08998278a3..c9f1fdc0283 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -390,14 +390,14 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
390 enable it if not. */ 390 enable it if not. */
391 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 391 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
392 392
393 if (!(l & (1<<16))) { 393 if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
394 l |= (1<<16); 394 l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
395 dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); 395 dprintk("trying to enable Enhanced SpeedStep (%x)\n", l);
396 wrmsr(MSR_IA32_MISC_ENABLE, l, h); 396 wrmsr(MSR_IA32_MISC_ENABLE, l, h);
397 397
398 /* check to see if it stuck */ 398 /* check to see if it stuck */
399 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 399 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
400 if (!(l & (1<<16))) { 400 if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
401 printk(KERN_INFO PFX 401 printk(KERN_INFO PFX
402 "couldn't enable Enhanced SpeedStep\n"); 402 "couldn't enable Enhanced SpeedStep\n");
403 return -ENODEV; 403 return -ENODEV;
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index ffd0f5ed071..593171e967e 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -61,23 +61,23 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
61 */ 61 */
62static unsigned char Cx86_dir0_msb __cpuinitdata = 0; 62static unsigned char Cx86_dir0_msb __cpuinitdata = 0;
63 63
64static char Cx86_model[][9] __cpuinitdata = { 64static const char __cpuinitconst Cx86_model[][9] = {
65 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", 65 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
66 "M II ", "Unknown" 66 "M II ", "Unknown"
67}; 67};
68static char Cx486_name[][5] __cpuinitdata = { 68static const char __cpuinitconst Cx486_name[][5] = {
69 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", 69 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
70 "SRx2", "DRx2" 70 "SRx2", "DRx2"
71}; 71};
72static char Cx486S_name[][4] __cpuinitdata = { 72static const char __cpuinitconst Cx486S_name[][4] = {
73 "S", "S2", "Se", "S2e" 73 "S", "S2", "Se", "S2e"
74}; 74};
75static char Cx486D_name[][4] __cpuinitdata = { 75static const char __cpuinitconst Cx486D_name[][4] = {
76 "DX", "DX2", "?", "?", "?", "DX4" 76 "DX", "DX2", "?", "?", "?", "DX4"
77}; 77};
78static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock"; 78static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock";
79static char cyrix_model_mult1[] __cpuinitdata = "12??43"; 79static const char __cpuinitconst cyrix_model_mult1[] = "12??43";
80static char cyrix_model_mult2[] __cpuinitdata = "12233445"; 80static const char __cpuinitconst cyrix_model_mult2[] = "12233445";
81 81
82/* 82/*
83 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old 83 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
@@ -435,7 +435,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
435 } 435 }
436} 436}
437 437
438static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { 438static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = {
439 .c_vendor = "Cyrix", 439 .c_vendor = "Cyrix",
440 .c_ident = { "CyrixInstead" }, 440 .c_ident = { "CyrixInstead" },
441 .c_early_init = early_init_cyrix, 441 .c_early_init = early_init_cyrix,
@@ -446,7 +446,7 @@ static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
446 446
447cpu_dev_register(cyrix_cpu_dev); 447cpu_dev_register(cyrix_cpu_dev);
448 448
449static struct cpu_dev nsc_cpu_dev __cpuinitdata = { 449static const struct cpu_dev __cpuinitconst nsc_cpu_dev = {
450 .c_vendor = "NSC", 450 .c_vendor = "NSC",
451 .c_ident = { "Geode by NSC" }, 451 .c_ident = { "Geode by NSC" },
452 .c_init = init_nsc, 452 .c_init = init_nsc,
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 24ff26a38ad..b09d4eb52bb 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -13,6 +13,7 @@
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14#include <asm/ds.h> 14#include <asm/ds.h>
15#include <asm/bugs.h> 15#include <asm/bugs.h>
16#include <asm/cpu.h>
16 17
17#ifdef CONFIG_X86_64 18#ifdef CONFIG_X86_64
18#include <asm/topology.h> 19#include <asm/topology.h>
@@ -24,7 +25,6 @@
24#ifdef CONFIG_X86_LOCAL_APIC 25#ifdef CONFIG_X86_LOCAL_APIC
25#include <asm/mpspec.h> 26#include <asm/mpspec.h>
26#include <asm/apic.h> 27#include <asm/apic.h>
27#include <mach_apic.h>
28#endif 28#endif
29 29
30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
@@ -54,6 +54,11 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
54 c->x86_cache_alignment = 128; 54 c->x86_cache_alignment = 128;
55#endif 55#endif
56 56
57 /* CPUID workaround for 0F33/0F34 CPU */
58 if (c->x86 == 0xF && c->x86_model == 0x3
59 && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
60 c->x86_phys_bits = 36;
61
57 /* 62 /*
58 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 63 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
59 * with P/T states and does not stop in deep C-states 64 * with P/T states and does not stop in deep C-states
@@ -63,6 +68,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
63 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 68 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
64 } 69 }
65 70
71 /*
72 * There is a known erratum on Pentium III and Core Solo
73 * and Core Duo CPUs.
74 * " Page with PAT set to WC while associated MTRR is UC
75 * may consolidate to UC "
76 * Because of this erratum, it is better to stick with
77 * setting WC in MTRR rather than using PAT on these CPUs.
78 *
79 * Enable PAT WC only on P4, Core 2 or later CPUs.
80 */
81 if (c->x86 == 6 && c->x86_model < 15)
82 clear_cpu_cap(c, X86_FEATURE_PAT);
66} 83}
67 84
68#ifdef CONFIG_X86_32 85#ifdef CONFIG_X86_32
@@ -99,6 +116,28 @@ static void __cpuinit trap_init_f00f_bug(void)
99} 116}
100#endif 117#endif
101 118
119static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
120{
121#ifdef CONFIG_SMP
122 /* calling is from identify_secondary_cpu() ? */
123 if (c->cpu_index == boot_cpu_id)
124 return;
125
126 /*
127 * Mask B, Pentium, but not Pentium MMX
128 */
129 if (c->x86 == 5 &&
130 c->x86_mask >= 1 && c->x86_mask <= 4 &&
131 c->x86_model <= 3) {
132 /*
133 * Remember we have B step Pentia with bugs
134 */
135 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
136 "with B stepping processors.\n");
137 }
138#endif
139}
140
102static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 141static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
103{ 142{
104 unsigned long lo, hi; 143 unsigned long lo, hi;
@@ -135,10 +174,10 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
135 */ 174 */
136 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { 175 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
137 rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); 176 rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
138 if ((lo & (1<<9)) == 0) { 177 if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
139 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); 178 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
140 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); 179 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
141 lo |= (1<<9); /* Disable hw prefetching */ 180 lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
142 wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); 181 wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
143 } 182 }
144 } 183 }
@@ -175,6 +214,8 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
175#ifdef CONFIG_X86_NUMAQ 214#ifdef CONFIG_X86_NUMAQ
176 numaq_tsc_disable(); 215 numaq_tsc_disable();
177#endif 216#endif
217
218 intel_smp_check(c);
178} 219}
179#else 220#else
180static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 221static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
@@ -374,7 +415,7 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i
374} 415}
375#endif 416#endif
376 417
377static struct cpu_dev intel_cpu_dev __cpuinitdata = { 418static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
378 .c_vendor = "Intel", 419 .c_vendor = "Intel",
379 .c_ident = { "GenuineIntel" }, 420 .c_ident = { "GenuineIntel" },
380#ifdef CONFIG_X86_32 421#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index da299eb85fc..c471eb1a389 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -32,7 +32,7 @@ struct _cache_table
32}; 32};
33 33
34/* all the cache descriptor types we care about (no TLB or trace cache entries) */ 34/* all the cache descriptor types we care about (no TLB or trace cache entries) */
35static struct _cache_table cache_table[] __cpuinitdata = 35static const struct _cache_table __cpuinitconst cache_table[] =
36{ 36{
37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -147,7 +147,16 @@ struct _cpuid4_info {
147 union _cpuid4_leaf_ecx ecx; 147 union _cpuid4_leaf_ecx ecx;
148 unsigned long size; 148 unsigned long size;
149 unsigned long can_disable; 149 unsigned long can_disable;
150 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ 150 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
151};
152
153/* subset of above _cpuid4_info w/o shared_cpu_map */
154struct _cpuid4_info_regs {
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
158 unsigned long size;
159 unsigned long can_disable;
151}; 160};
152 161
153#ifdef CONFIG_PCI 162#ifdef CONFIG_PCI
@@ -197,15 +206,15 @@ union l3_cache {
197 unsigned val; 206 unsigned val;
198}; 207};
199 208
200static unsigned short assocs[] __cpuinitdata = { 209static const unsigned short __cpuinitconst assocs[] = {
201 [1] = 1, [2] = 2, [4] = 4, [6] = 8, 210 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
202 [8] = 16, [0xa] = 32, [0xb] = 48, 211 [8] = 16, [0xa] = 32, [0xb] = 48,
203 [0xc] = 64, 212 [0xc] = 64,
204 [0xf] = 0xffff // ?? 213 [0xf] = 0xffff // ??
205}; 214};
206 215
207static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; 216static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
208static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; 217static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
209 218
210static void __cpuinit 219static void __cpuinit
211amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, 220amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
@@ -278,7 +287,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
278} 287}
279 288
280static void __cpuinit 289static void __cpuinit
281amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) 290amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
282{ 291{
283 if (index < 3) 292 if (index < 3)
284 return; 293 return;
@@ -286,7 +295,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
286} 295}
287 296
288static int 297static int
289__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 298__cpuinit cpuid4_cache_lookup_regs(int index,
299 struct _cpuid4_info_regs *this_leaf)
290{ 300{
291 union _cpuid4_leaf_eax eax; 301 union _cpuid4_leaf_eax eax;
292 union _cpuid4_leaf_ebx ebx; 302 union _cpuid4_leaf_ebx ebx;
@@ -314,6 +324,15 @@ __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
314 return 0; 324 return 0;
315} 325}
316 326
327static int
328__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
329{
330 struct _cpuid4_info_regs *leaf_regs =
331 (struct _cpuid4_info_regs *)this_leaf;
332
333 return cpuid4_cache_lookup_regs(index, leaf_regs);
334}
335
317static int __cpuinit find_num_cache_leaves(void) 336static int __cpuinit find_num_cache_leaves(void)
318{ 337{
319 unsigned int eax, ebx, ecx, edx; 338 unsigned int eax, ebx, ecx, edx;
@@ -353,11 +372,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
353 * parameters cpuid leaf to find the cache details 372 * parameters cpuid leaf to find the cache details
354 */ 373 */
355 for (i = 0; i < num_cache_leaves; i++) { 374 for (i = 0; i < num_cache_leaves; i++) {
356 struct _cpuid4_info this_leaf; 375 struct _cpuid4_info_regs this_leaf;
357
358 int retval; 376 int retval;
359 377
360 retval = cpuid4_cache_lookup(i, &this_leaf); 378 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
361 if (retval >= 0) { 379 if (retval >= 0) {
362 switch(this_leaf.eax.split.level) { 380 switch(this_leaf.eax.split.level) {
363 case 1: 381 case 1:
@@ -506,17 +524,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
506 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; 524 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
507 525
508 if (num_threads_sharing == 1) 526 if (num_threads_sharing == 1)
509 cpu_set(cpu, this_leaf->shared_cpu_map); 527 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
510 else { 528 else {
511 index_msb = get_count_order(num_threads_sharing); 529 index_msb = get_count_order(num_threads_sharing);
512 530
513 for_each_online_cpu(i) { 531 for_each_online_cpu(i) {
514 if (cpu_data(i).apicid >> index_msb == 532 if (cpu_data(i).apicid >> index_msb ==
515 c->apicid >> index_msb) { 533 c->apicid >> index_msb) {
516 cpu_set(i, this_leaf->shared_cpu_map); 534 cpumask_set_cpu(i,
535 to_cpumask(this_leaf->shared_cpu_map));
517 if (i != cpu && per_cpu(cpuid4_info, i)) { 536 if (i != cpu && per_cpu(cpuid4_info, i)) {
518 sibling_leaf = CPUID4_INFO_IDX(i, index); 537 sibling_leaf =
519 cpu_set(cpu, sibling_leaf->shared_cpu_map); 538 CPUID4_INFO_IDX(i, index);
539 cpumask_set_cpu(cpu, to_cpumask(
540 sibling_leaf->shared_cpu_map));
520 } 541 }
521 } 542 }
522 } 543 }
@@ -528,9 +549,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
528 int sibling; 549 int sibling;
529 550
530 this_leaf = CPUID4_INFO_IDX(cpu, index); 551 this_leaf = CPUID4_INFO_IDX(cpu, index);
531 for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { 552 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
532 sibling_leaf = CPUID4_INFO_IDX(sibling, index); 553 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
533 cpu_clear(cpu, sibling_leaf->shared_cpu_map); 554 cpumask_clear_cpu(cpu,
555 to_cpumask(sibling_leaf->shared_cpu_map));
534 } 556 }
535} 557}
536#else 558#else
@@ -635,8 +657,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
635 int n = 0; 657 int n = 0;
636 658
637 if (len > 1) { 659 if (len > 1) {
638 cpumask_t *mask = &this_leaf->shared_cpu_map; 660 const struct cpumask *mask;
639 661
662 mask = to_cpumask(this_leaf->shared_cpu_map);
640 n = type? 663 n = type?
641 cpulist_scnprintf(buf, len-2, mask) : 664 cpulist_scnprintf(buf, len-2, mask) :
642 cpumask_scnprintf(buf, len-2, mask); 665 cpumask_scnprintf(buf, len-2, mask);
@@ -699,7 +722,8 @@ static struct pci_dev *get_k8_northbridge(int node)
699 722
700static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) 723static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
701{ 724{
702 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); 725 const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
726 int node = cpu_to_node(cpumask_first(mask));
703 struct pci_dev *dev = NULL; 727 struct pci_dev *dev = NULL;
704 ssize_t ret = 0; 728 ssize_t ret = 0;
705 int i; 729 int i;
@@ -733,7 +757,8 @@ static ssize_t
733store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, 757store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
734 size_t count) 758 size_t count)
735{ 759{
736 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); 760 const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
761 int node = cpu_to_node(cpumask_first(mask));
737 struct pci_dev *dev = NULL; 762 struct pci_dev *dev = NULL;
738 unsigned int ret, index, val; 763 unsigned int ret, index, val;
739 764
@@ -878,7 +903,7 @@ err_out:
878 return -ENOMEM; 903 return -ENOMEM;
879} 904}
880 905
881static cpumask_t cache_dev_map = CPU_MASK_NONE; 906static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
882 907
883/* Add/Remove cache interface for CPU device */ 908/* Add/Remove cache interface for CPU device */
884static int __cpuinit cache_add_dev(struct sys_device * sys_dev) 909static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
@@ -918,7 +943,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
918 } 943 }
919 kobject_uevent(&(this_object->kobj), KOBJ_ADD); 944 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
920 } 945 }
921 cpu_set(cpu, cache_dev_map); 946 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
922 947
923 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); 948 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
924 return 0; 949 return 0;
@@ -931,9 +956,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
931 956
932 if (per_cpu(cpuid4_info, cpu) == NULL) 957 if (per_cpu(cpuid4_info, cpu) == NULL)
933 return; 958 return;
934 if (!cpu_isset(cpu, cache_dev_map)) 959 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
935 return; 960 return;
936 cpu_clear(cpu, cache_dev_map); 961 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
937 962
938 for (i = 0; i < num_cache_leaves; i++) 963 for (i = 0; i < num_cache_leaves; i++)
939 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); 964 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile
index d7d2323bbb6..b2f89829bbe 100644
--- a/arch/x86/kernel/cpu/mcheck/Makefile
+++ b/arch/x86/kernel/cpu/mcheck/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_X86_32) += k7.o p4.o p5.o p6.o winchip.o
4obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o 4obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o
5obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o 5obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o
6obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o 6obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o
7obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c
index dfaebce3633..3552119b091 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_32.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_32.c
@@ -60,20 +60,6 @@ void mcheck_init(struct cpuinfo_x86 *c)
60 } 60 }
61} 61}
62 62
63static unsigned long old_cr4 __initdata;
64
65void __init stop_mce(void)
66{
67 old_cr4 = read_cr4();
68 clear_in_cr4(X86_CR4_MCE);
69}
70
71void __init restart_mce(void)
72{
73 if (old_cr4 & X86_CR4_MCE)
74 set_in_cr4(X86_CR4_MCE);
75}
76
77static int __init mcheck_disable(char *str) 63static int __init mcheck_disable(char *str)
78{ 64{
79 mce_disabled = 1; 65 mce_disabled = 1;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index fe79985ce0f..ca14604611e 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -3,6 +3,8 @@
3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. 3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 * Rest from unknown author(s). 4 * Rest from unknown author(s).
5 * 2004 Andi Kleen. Rewrote most of it. 5 * 2004 Andi Kleen. Rewrote most of it.
6 * Copyright 2008 Intel Corporation
7 * Author: Andi Kleen
6 */ 8 */
7 9
8#include <linux/init.h> 10#include <linux/init.h>
@@ -24,6 +26,9 @@
24#include <linux/ctype.h> 26#include <linux/ctype.h>
25#include <linux/kmod.h> 27#include <linux/kmod.h>
26#include <linux/kdebug.h> 28#include <linux/kdebug.h>
29#include <linux/kobject.h>
30#include <linux/sysfs.h>
31#include <linux/ratelimit.h>
27#include <asm/processor.h> 32#include <asm/processor.h>
28#include <asm/msr.h> 33#include <asm/msr.h>
29#include <asm/mce.h> 34#include <asm/mce.h>
@@ -32,7 +37,6 @@
32#include <asm/idle.h> 37#include <asm/idle.h>
33 38
34#define MISC_MCELOG_MINOR 227 39#define MISC_MCELOG_MINOR 227
35#define NR_SYSFS_BANKS 6
36 40
37atomic_t mce_entry; 41atomic_t mce_entry;
38 42
@@ -47,7 +51,7 @@ static int mce_dont_init;
47 */ 51 */
48static int tolerant = 1; 52static int tolerant = 1;
49static int banks; 53static int banks;
50static unsigned long bank[NR_SYSFS_BANKS] = { [0 ... NR_SYSFS_BANKS-1] = ~0UL }; 54static u64 *bank;
51static unsigned long notify_user; 55static unsigned long notify_user;
52static int rip_msr; 56static int rip_msr;
53static int mce_bootlog = -1; 57static int mce_bootlog = -1;
@@ -58,6 +62,19 @@ static char *trigger_argv[2] = { trigger, NULL };
58 62
59static DECLARE_WAIT_QUEUE_HEAD(mce_wait); 63static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
60 64
65/* MCA banks polled by the period polling timer for corrected events */
66DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
67 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
68};
69
70/* Do initial initialization of a struct mce */
71void mce_setup(struct mce *m)
72{
73 memset(m, 0, sizeof(struct mce));
74 m->cpu = smp_processor_id();
75 rdtscll(m->tsc);
76}
77
61/* 78/*
62 * Lockless MCE logging infrastructure. 79 * Lockless MCE logging infrastructure.
63 * This avoids deadlocks on printk locks without having to break locks. Also 80 * This avoids deadlocks on printk locks without having to break locks. Also
@@ -119,11 +136,11 @@ static void print_mce(struct mce *m)
119 print_symbol("{%s}", m->ip); 136 print_symbol("{%s}", m->ip);
120 printk("\n"); 137 printk("\n");
121 } 138 }
122 printk(KERN_EMERG "TSC %Lx ", m->tsc); 139 printk(KERN_EMERG "TSC %llx ", m->tsc);
123 if (m->addr) 140 if (m->addr)
124 printk("ADDR %Lx ", m->addr); 141 printk("ADDR %llx ", m->addr);
125 if (m->misc) 142 if (m->misc)
126 printk("MISC %Lx ", m->misc); 143 printk("MISC %llx ", m->misc);
127 printk("\n"); 144 printk("\n");
128 printk(KERN_EMERG "This is not a software problem!\n"); 145 printk(KERN_EMERG "This is not a software problem!\n");
129 printk(KERN_EMERG "Run through mcelog --ascii to decode " 146 printk(KERN_EMERG "Run through mcelog --ascii to decode "
@@ -149,8 +166,10 @@ static void mce_panic(char *msg, struct mce *backup, unsigned long start)
149 panic(msg); 166 panic(msg);
150} 167}
151 168
152static int mce_available(struct cpuinfo_x86 *c) 169int mce_available(struct cpuinfo_x86 *c)
153{ 170{
171 if (mce_dont_init)
172 return 0;
154 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); 173 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
155} 174}
156 175
@@ -172,7 +191,77 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
172} 191}
173 192
174/* 193/*
175 * The actual machine check handler 194 * Poll for corrected events or events that happened before reset.
195 * Those are just logged through /dev/mcelog.
196 *
197 * This is executed in standard interrupt context.
198 */
199void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
200{
201 struct mce m;
202 int i;
203
204 mce_setup(&m);
205
206 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
207 for (i = 0; i < banks; i++) {
208 if (!bank[i] || !test_bit(i, *b))
209 continue;
210
211 m.misc = 0;
212 m.addr = 0;
213 m.bank = i;
214 m.tsc = 0;
215
216 barrier();
217 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
218 if (!(m.status & MCI_STATUS_VAL))
219 continue;
220
221 /*
222 * Uncorrected events are handled by the exception handler
223 * when it is enabled. But when the exception is disabled log
224 * everything.
225 *
226 * TBD do the same check for MCI_STATUS_EN here?
227 */
228 if ((m.status & MCI_STATUS_UC) && !(flags & MCP_UC))
229 continue;
230
231 if (m.status & MCI_STATUS_MISCV)
232 rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
233 if (m.status & MCI_STATUS_ADDRV)
234 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
235
236 if (!(flags & MCP_TIMESTAMP))
237 m.tsc = 0;
238 /*
239 * Don't get the IP here because it's unlikely to
240 * have anything to do with the actual error location.
241 */
242
243 mce_log(&m);
244 add_taint(TAINT_MACHINE_CHECK);
245
246 /*
247 * Clear state for this bank.
248 */
249 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
250 }
251
252 /*
253 * Don't clear MCG_STATUS here because it's only defined for
254 * exceptions.
255 */
256}
257
258/*
259 * The actual machine check handler. This only handles real
260 * exceptions when something got corrupted coming in through int 18.
261 *
262 * This is executed in NMI context not subject to normal locking rules. This
263 * implies that most kernel services cannot be safely used. Don't even
264 * think about putting a printk in there!
176 */ 265 */
177void do_machine_check(struct pt_regs * regs, long error_code) 266void do_machine_check(struct pt_regs * regs, long error_code)
178{ 267{
@@ -190,17 +279,18 @@ void do_machine_check(struct pt_regs * regs, long error_code)
190 * error. 279 * error.
191 */ 280 */
192 int kill_it = 0; 281 int kill_it = 0;
282 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
193 283
194 atomic_inc(&mce_entry); 284 atomic_inc(&mce_entry);
195 285
196 if ((regs 286 if (notify_die(DIE_NMI, "machine check", regs, error_code,
197 && notify_die(DIE_NMI, "machine check", regs, error_code,
198 18, SIGKILL) == NOTIFY_STOP) 287 18, SIGKILL) == NOTIFY_STOP)
199 || !banks) 288 goto out2;
289 if (!banks)
200 goto out2; 290 goto out2;
201 291
202 memset(&m, 0, sizeof(struct mce)); 292 mce_setup(&m);
203 m.cpu = smp_processor_id(); 293
204 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); 294 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
205 /* if the restart IP is not valid, we're done for */ 295 /* if the restart IP is not valid, we're done for */
206 if (!(m.mcgstatus & MCG_STATUS_RIPV)) 296 if (!(m.mcgstatus & MCG_STATUS_RIPV))
@@ -210,18 +300,32 @@ void do_machine_check(struct pt_regs * regs, long error_code)
210 barrier(); 300 barrier();
211 301
212 for (i = 0; i < banks; i++) { 302 for (i = 0; i < banks; i++) {
213 if (i < NR_SYSFS_BANKS && !bank[i]) 303 __clear_bit(i, toclear);
304 if (!bank[i])
214 continue; 305 continue;
215 306
216 m.misc = 0; 307 m.misc = 0;
217 m.addr = 0; 308 m.addr = 0;
218 m.bank = i; 309 m.bank = i;
219 m.tsc = 0;
220 310
221 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status); 311 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
222 if ((m.status & MCI_STATUS_VAL) == 0) 312 if ((m.status & MCI_STATUS_VAL) == 0)
223 continue; 313 continue;
224 314
315 /*
316 * Non uncorrected errors are handled by machine_check_poll
317 * Leave them alone.
318 */
319 if ((m.status & MCI_STATUS_UC) == 0)
320 continue;
321
322 /*
323 * Set taint even when machine check was not enabled.
324 */
325 add_taint(TAINT_MACHINE_CHECK);
326
327 __set_bit(i, toclear);
328
225 if (m.status & MCI_STATUS_EN) { 329 if (m.status & MCI_STATUS_EN) {
226 /* if PCC was set, there's no way out */ 330 /* if PCC was set, there's no way out */
227 no_way_out |= !!(m.status & MCI_STATUS_PCC); 331 no_way_out |= !!(m.status & MCI_STATUS_PCC);
@@ -235,6 +339,12 @@ void do_machine_check(struct pt_regs * regs, long error_code)
235 no_way_out = 1; 339 no_way_out = 1;
236 kill_it = 1; 340 kill_it = 1;
237 } 341 }
342 } else {
343 /*
344 * Machine check event was not enabled. Clear, but
345 * ignore.
346 */
347 continue;
238 } 348 }
239 349
240 if (m.status & MCI_STATUS_MISCV) 350 if (m.status & MCI_STATUS_MISCV)
@@ -243,10 +353,7 @@ void do_machine_check(struct pt_regs * regs, long error_code)
243 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr); 353 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
244 354
245 mce_get_rip(&m, regs); 355 mce_get_rip(&m, regs);
246 if (error_code >= 0) 356 mce_log(&m);
247 rdtscll(m.tsc);
248 if (error_code != -2)
249 mce_log(&m);
250 357
251 /* Did this bank cause the exception? */ 358 /* Did this bank cause the exception? */
252 /* Assume that the bank with uncorrectable errors did it, 359 /* Assume that the bank with uncorrectable errors did it,
@@ -255,14 +362,8 @@ void do_machine_check(struct pt_regs * regs, long error_code)
255 panicm = m; 362 panicm = m;
256 panicm_found = 1; 363 panicm_found = 1;
257 } 364 }
258
259 add_taint(TAINT_MACHINE_CHECK);
260 } 365 }
261 366
262 /* Never do anything final in the polling timer */
263 if (!regs)
264 goto out;
265
266 /* If we didn't find an uncorrectable error, pick 367 /* If we didn't find an uncorrectable error, pick
267 the last one (shouldn't happen, just being safe). */ 368 the last one (shouldn't happen, just being safe). */
268 if (!panicm_found) 369 if (!panicm_found)
@@ -309,10 +410,11 @@ void do_machine_check(struct pt_regs * regs, long error_code)
309 /* notify userspace ASAP */ 410 /* notify userspace ASAP */
310 set_thread_flag(TIF_MCE_NOTIFY); 411 set_thread_flag(TIF_MCE_NOTIFY);
311 412
312 out:
313 /* the last thing we do is clear state */ 413 /* the last thing we do is clear state */
314 for (i = 0; i < banks; i++) 414 for (i = 0; i < banks; i++) {
315 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); 415 if (test_bit(i, toclear))
416 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
417 }
316 wrmsrl(MSR_IA32_MCG_STATUS, 0); 418 wrmsrl(MSR_IA32_MCG_STATUS, 0);
317 out2: 419 out2:
318 atomic_dec(&mce_entry); 420 atomic_dec(&mce_entry);
@@ -332,15 +434,13 @@ void do_machine_check(struct pt_regs * regs, long error_code)
332 * and historically has been the register value of the 434 * and historically has been the register value of the
333 * MSR_IA32_THERMAL_STATUS (Intel) msr. 435 * MSR_IA32_THERMAL_STATUS (Intel) msr.
334 */ 436 */
335void mce_log_therm_throt_event(unsigned int cpu, __u64 status) 437void mce_log_therm_throt_event(__u64 status)
336{ 438{
337 struct mce m; 439 struct mce m;
338 440
339 memset(&m, 0, sizeof(m)); 441 mce_setup(&m);
340 m.cpu = cpu;
341 m.bank = MCE_THERMAL_BANK; 442 m.bank = MCE_THERMAL_BANK;
342 m.status = status; 443 m.status = status;
343 rdtscll(m.tsc);
344 mce_log(&m); 444 mce_log(&m);
345} 445}
346#endif /* CONFIG_X86_MCE_INTEL */ 446#endif /* CONFIG_X86_MCE_INTEL */
@@ -353,18 +453,18 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
353 453
354static int check_interval = 5 * 60; /* 5 minutes */ 454static int check_interval = 5 * 60; /* 5 minutes */
355static int next_interval; /* in jiffies */ 455static int next_interval; /* in jiffies */
356static void mcheck_timer(struct work_struct *work); 456static void mcheck_timer(unsigned long);
357static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer); 457static DEFINE_PER_CPU(struct timer_list, mce_timer);
358 458
359static void mcheck_check_cpu(void *info) 459static void mcheck_timer(unsigned long data)
360{ 460{
361 if (mce_available(&current_cpu_data)) 461 struct timer_list *t = &per_cpu(mce_timer, data);
362 do_machine_check(NULL, 0);
363}
364 462
365static void mcheck_timer(struct work_struct *work) 463 WARN_ON(smp_processor_id() != data);
366{ 464
367 on_each_cpu(mcheck_check_cpu, NULL, 1); 465 if (mce_available(&current_cpu_data))
466 machine_check_poll(MCP_TIMESTAMP,
467 &__get_cpu_var(mce_poll_banks));
368 468
369 /* 469 /*
370 * Alert userspace if needed. If we logged an MCE, reduce the 470 * Alert userspace if needed. If we logged an MCE, reduce the
@@ -377,31 +477,41 @@ static void mcheck_timer(struct work_struct *work)
377 (int)round_jiffies_relative(check_interval*HZ)); 477 (int)round_jiffies_relative(check_interval*HZ));
378 } 478 }
379 479
380 schedule_delayed_work(&mcheck_work, next_interval); 480 t->expires = jiffies + next_interval;
481 add_timer(t);
482}
483
484static void mce_do_trigger(struct work_struct *work)
485{
486 call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT);
381} 487}
382 488
489static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
490
383/* 491/*
384 * This is only called from process context. This is where we do 492 * Notify the user(s) about new machine check events.
385 * anything we need to alert userspace about new MCEs. This is called 493 * Can be called from interrupt context, but not from machine check/NMI
386 * directly from the poller and also from entry.S and idle, thanks to 494 * context.
387 * TIF_MCE_NOTIFY.
388 */ 495 */
389int mce_notify_user(void) 496int mce_notify_user(void)
390{ 497{
498 /* Not more than two messages every minute */
499 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
500
391 clear_thread_flag(TIF_MCE_NOTIFY); 501 clear_thread_flag(TIF_MCE_NOTIFY);
392 if (test_and_clear_bit(0, &notify_user)) { 502 if (test_and_clear_bit(0, &notify_user)) {
393 static unsigned long last_print;
394 unsigned long now = jiffies;
395
396 wake_up_interruptible(&mce_wait); 503 wake_up_interruptible(&mce_wait);
397 if (trigger[0])
398 call_usermodehelper(trigger, trigger_argv, NULL,
399 UMH_NO_WAIT);
400 504
401 if (time_after_eq(now, last_print + (check_interval*HZ))) { 505 /*
402 last_print = now; 506 * There is no risk of missing notifications because
507 * work_pending is always cleared before the function is
508 * executed.
509 */
510 if (trigger[0] && !work_pending(&mce_trigger_work))
511 schedule_work(&mce_trigger_work);
512
513 if (__ratelimit(&ratelimit))
403 printk(KERN_INFO "Machine check events logged\n"); 514 printk(KERN_INFO "Machine check events logged\n");
404 }
405 515
406 return 1; 516 return 1;
407 } 517 }
@@ -425,63 +535,78 @@ static struct notifier_block mce_idle_notifier = {
425 535
426static __init int periodic_mcheck_init(void) 536static __init int periodic_mcheck_init(void)
427{ 537{
428 next_interval = check_interval * HZ; 538 idle_notifier_register(&mce_idle_notifier);
429 if (next_interval) 539 return 0;
430 schedule_delayed_work(&mcheck_work,
431 round_jiffies_relative(next_interval));
432 idle_notifier_register(&mce_idle_notifier);
433 return 0;
434} 540}
435__initcall(periodic_mcheck_init); 541__initcall(periodic_mcheck_init);
436 542
437
438/* 543/*
439 * Initialize Machine Checks for a CPU. 544 * Initialize Machine Checks for a CPU.
440 */ 545 */
441static void mce_init(void *dummy) 546static int mce_cap_init(void)
442{ 547{
443 u64 cap; 548 u64 cap;
444 int i; 549 unsigned b;
445 550
446 rdmsrl(MSR_IA32_MCG_CAP, cap); 551 rdmsrl(MSR_IA32_MCG_CAP, cap);
447 banks = cap & 0xff; 552 b = cap & 0xff;
448 if (banks > MCE_EXTENDED_BANK) { 553 if (b > MAX_NR_BANKS) {
449 banks = MCE_EXTENDED_BANK; 554 printk(KERN_WARNING
450 printk(KERN_INFO "MCE: warning: using only %d banks\n", 555 "MCE: Using only %u machine check banks out of %u\n",
451 MCE_EXTENDED_BANK); 556 MAX_NR_BANKS, b);
557 b = MAX_NR_BANKS;
452 } 558 }
559
560 /* Don't support asymmetric configurations today */
561 WARN_ON(banks != 0 && b != banks);
562 banks = b;
563 if (!bank) {
564 bank = kmalloc(banks * sizeof(u64), GFP_KERNEL);
565 if (!bank)
566 return -ENOMEM;
567 memset(bank, 0xff, banks * sizeof(u64));
568 }
569
453 /* Use accurate RIP reporting if available. */ 570 /* Use accurate RIP reporting if available. */
454 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) 571 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
455 rip_msr = MSR_IA32_MCG_EIP; 572 rip_msr = MSR_IA32_MCG_EIP;
456 573
457 /* Log the machine checks left over from the previous reset. 574 return 0;
458 This also clears all registers */ 575}
459 do_machine_check(NULL, mce_bootlog ? -1 : -2); 576
577static void mce_init(void *dummy)
578{
579 u64 cap;
580 int i;
581 mce_banks_t all_banks;
582
583 /*
584 * Log the machine checks left over from the previous reset.
585 */
586 bitmap_fill(all_banks, MAX_NR_BANKS);
587 machine_check_poll(MCP_UC, &all_banks);
460 588
461 set_in_cr4(X86_CR4_MCE); 589 set_in_cr4(X86_CR4_MCE);
462 590
591 rdmsrl(MSR_IA32_MCG_CAP, cap);
463 if (cap & MCG_CTL_P) 592 if (cap & MCG_CTL_P)
464 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); 593 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
465 594
466 for (i = 0; i < banks; i++) { 595 for (i = 0; i < banks; i++) {
467 if (i < NR_SYSFS_BANKS) 596 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
468 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
469 else
470 wrmsrl(MSR_IA32_MC0_CTL+4*i, ~0UL);
471
472 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); 597 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
473 } 598 }
474} 599}
475 600
476/* Add per CPU specific workarounds here */ 601/* Add per CPU specific workarounds here */
477static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) 602static void mce_cpu_quirks(struct cpuinfo_x86 *c)
478{ 603{
479 /* This should be disabled by the BIOS, but isn't always */ 604 /* This should be disabled by the BIOS, but isn't always */
480 if (c->x86_vendor == X86_VENDOR_AMD) { 605 if (c->x86_vendor == X86_VENDOR_AMD) {
481 if(c->x86 == 15) 606 if (c->x86 == 15 && banks > 4)
482 /* disable GART TBL walk error reporting, which trips off 607 /* disable GART TBL walk error reporting, which trips off
483 incorrectly with the IOMMU & 3ware & Cerberus. */ 608 incorrectly with the IOMMU & 3ware & Cerberus. */
484 clear_bit(10, &bank[4]); 609 clear_bit(10, (unsigned long *)&bank[4]);
485 if(c->x86 <= 17 && mce_bootlog < 0) 610 if(c->x86 <= 17 && mce_bootlog < 0)
486 /* Lots of broken BIOS around that don't clear them 611 /* Lots of broken BIOS around that don't clear them
487 by default and leave crap in there. Don't log. */ 612 by default and leave crap in there. Don't log. */
@@ -504,20 +629,38 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
504 } 629 }
505} 630}
506 631
632static void mce_init_timer(void)
633{
634 struct timer_list *t = &__get_cpu_var(mce_timer);
635
636 /* data race harmless because everyone sets to the same value */
637 if (!next_interval)
638 next_interval = check_interval * HZ;
639 if (!next_interval)
640 return;
641 setup_timer(t, mcheck_timer, smp_processor_id());
642 t->expires = round_jiffies(jiffies + next_interval);
643 add_timer(t);
644}
645
507/* 646/*
508 * Called for each booted CPU to set up machine checks. 647 * Called for each booted CPU to set up machine checks.
509 * Must be called with preempt off. 648 * Must be called with preempt off.
510 */ 649 */
511void __cpuinit mcheck_init(struct cpuinfo_x86 *c) 650void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
512{ 651{
513 mce_cpu_quirks(c); 652 if (!mce_available(c))
653 return;
514 654
515 if (mce_dont_init || 655 if (mce_cap_init() < 0) {
516 !mce_available(c)) 656 mce_dont_init = 1;
517 return; 657 return;
658 }
659 mce_cpu_quirks(c);
518 660
519 mce_init(NULL); 661 mce_init(NULL);
520 mce_cpu_features(c); 662 mce_cpu_features(c);
663 mce_init_timer();
521} 664}
522 665
523/* 666/*
@@ -573,7 +716,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
573{ 716{
574 unsigned long *cpu_tsc; 717 unsigned long *cpu_tsc;
575 static DEFINE_MUTEX(mce_read_mutex); 718 static DEFINE_MUTEX(mce_read_mutex);
576 unsigned next; 719 unsigned prev, next;
577 char __user *buf = ubuf; 720 char __user *buf = ubuf;
578 int i, err; 721 int i, err;
579 722
@@ -592,25 +735,32 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
592 } 735 }
593 736
594 err = 0; 737 err = 0;
595 for (i = 0; i < next; i++) { 738 prev = 0;
596 unsigned long start = jiffies; 739 do {
597 740 for (i = prev; i < next; i++) {
598 while (!mcelog.entry[i].finished) { 741 unsigned long start = jiffies;
599 if (time_after_eq(jiffies, start + 2)) { 742
600 memset(mcelog.entry + i,0, sizeof(struct mce)); 743 while (!mcelog.entry[i].finished) {
601 goto timeout; 744 if (time_after_eq(jiffies, start + 2)) {
745 memset(mcelog.entry + i, 0,
746 sizeof(struct mce));
747 goto timeout;
748 }
749 cpu_relax();
602 } 750 }
603 cpu_relax(); 751 smp_rmb();
752 err |= copy_to_user(buf, mcelog.entry + i,
753 sizeof(struct mce));
754 buf += sizeof(struct mce);
755timeout:
756 ;
604 } 757 }
605 smp_rmb();
606 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
607 buf += sizeof(struct mce);
608 timeout:
609 ;
610 }
611 758
612 memset(mcelog.entry, 0, next * sizeof(struct mce)); 759 memset(mcelog.entry + prev, 0,
613 mcelog.next = 0; 760 (next - prev) * sizeof(struct mce));
761 prev = next;
762 next = cmpxchg(&mcelog.next, prev, 0);
763 } while (next != prev);
614 764
615 synchronize_sched(); 765 synchronize_sched();
616 766
@@ -680,20 +830,6 @@ static struct miscdevice mce_log_device = {
680 &mce_chrdev_ops, 830 &mce_chrdev_ops,
681}; 831};
682 832
683static unsigned long old_cr4 __initdata;
684
685void __init stop_mce(void)
686{
687 old_cr4 = read_cr4();
688 clear_in_cr4(X86_CR4_MCE);
689}
690
691void __init restart_mce(void)
692{
693 if (old_cr4 & X86_CR4_MCE)
694 set_in_cr4(X86_CR4_MCE);
695}
696
697/* 833/*
698 * Old style boot options parsing. Only for compatibility. 834 * Old style boot options parsing. Only for compatibility.
699 */ 835 */
@@ -703,8 +839,7 @@ static int __init mcheck_disable(char *str)
703 return 1; 839 return 1;
704} 840}
705 841
706/* mce=off disables machine check. Note you can re-enable it later 842/* mce=off disables machine check.
707 using sysfs.
708 mce=TOLERANCELEVEL (number, see above) 843 mce=TOLERANCELEVEL (number, see above)
709 mce=bootlog Log MCEs from before booting. Disabled by default on AMD. 844 mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
710 mce=nobootlog Don't log MCEs from before booting. */ 845 mce=nobootlog Don't log MCEs from before booting. */
@@ -728,6 +863,29 @@ __setup("mce=", mcheck_enable);
728 * Sysfs support 863 * Sysfs support
729 */ 864 */
730 865
866/*
867 * Disable machine checks on suspend and shutdown. We can't really handle
868 * them later.
869 */
870static int mce_disable(void)
871{
872 int i;
873
874 for (i = 0; i < banks; i++)
875 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
876 return 0;
877}
878
879static int mce_suspend(struct sys_device *dev, pm_message_t state)
880{
881 return mce_disable();
882}
883
884static int mce_shutdown(struct sys_device *dev)
885{
886 return mce_disable();
887}
888
731/* On resume clear all MCE state. Don't want to see leftovers from the BIOS. 889/* On resume clear all MCE state. Don't want to see leftovers from the BIOS.
732 Only one CPU is active at this time, the others get readded later using 890 Only one CPU is active at this time, the others get readded later using
733 CPU hotplug. */ 891 CPU hotplug. */
@@ -738,20 +896,24 @@ static int mce_resume(struct sys_device *dev)
738 return 0; 896 return 0;
739} 897}
740 898
899static void mce_cpu_restart(void *data)
900{
901 del_timer_sync(&__get_cpu_var(mce_timer));
902 if (mce_available(&current_cpu_data))
903 mce_init(NULL);
904 mce_init_timer();
905}
906
741/* Reinit MCEs after user configuration changes */ 907/* Reinit MCEs after user configuration changes */
742static void mce_restart(void) 908static void mce_restart(void)
743{ 909{
744 if (next_interval)
745 cancel_delayed_work(&mcheck_work);
746 /* Timer race is harmless here */
747 on_each_cpu(mce_init, NULL, 1);
748 next_interval = check_interval * HZ; 910 next_interval = check_interval * HZ;
749 if (next_interval) 911 on_each_cpu(mce_cpu_restart, NULL, 1);
750 schedule_delayed_work(&mcheck_work,
751 round_jiffies_relative(next_interval));
752} 912}
753 913
754static struct sysdev_class mce_sysclass = { 914static struct sysdev_class mce_sysclass = {
915 .suspend = mce_suspend,
916 .shutdown = mce_shutdown,
755 .resume = mce_resume, 917 .resume = mce_resume,
756 .name = "machinecheck", 918 .name = "machinecheck",
757}; 919};
@@ -778,16 +940,26 @@ void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinit
778 } \ 940 } \
779 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); 941 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
780 942
781/* 943static struct sysdev_attribute *bank_attrs;
782 * TBD should generate these dynamically based on number of available banks. 944
783 * Have only 6 contol banks in /sysfs until then. 945static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
784 */ 946 char *buf)
785ACCESSOR(bank0ctl,bank[0],mce_restart()) 947{
786ACCESSOR(bank1ctl,bank[1],mce_restart()) 948 u64 b = bank[attr - bank_attrs];
787ACCESSOR(bank2ctl,bank[2],mce_restart()) 949 return sprintf(buf, "%llx\n", b);
788ACCESSOR(bank3ctl,bank[3],mce_restart()) 950}
789ACCESSOR(bank4ctl,bank[4],mce_restart()) 951
790ACCESSOR(bank5ctl,bank[5],mce_restart()) 952static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
953 const char *buf, size_t siz)
954{
955 char *end;
956 u64 new = simple_strtoull(buf, &end, 0);
957 if (end == buf)
958 return -EINVAL;
959 bank[attr - bank_attrs] = new;
960 mce_restart();
961 return end-buf;
962}
791 963
792static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr, 964static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr,
793 char *buf) 965 char *buf)
@@ -814,8 +986,6 @@ static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
814static SYSDEV_INT_ATTR(tolerant, 0644, tolerant); 986static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
815ACCESSOR(check_interval,check_interval,mce_restart()) 987ACCESSOR(check_interval,check_interval,mce_restart())
816static struct sysdev_attribute *mce_attributes[] = { 988static struct sysdev_attribute *mce_attributes[] = {
817 &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
818 &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl,
819 &attr_tolerant.attr, &attr_check_interval, &attr_trigger, 989 &attr_tolerant.attr, &attr_check_interval, &attr_trigger,
820 NULL 990 NULL
821}; 991};
@@ -845,11 +1015,22 @@ static __cpuinit int mce_create_device(unsigned int cpu)
845 if (err) 1015 if (err)
846 goto error; 1016 goto error;
847 } 1017 }
1018 for (i = 0; i < banks; i++) {
1019 err = sysdev_create_file(&per_cpu(device_mce, cpu),
1020 &bank_attrs[i]);
1021 if (err)
1022 goto error2;
1023 }
848 cpu_set(cpu, mce_device_initialized); 1024 cpu_set(cpu, mce_device_initialized);
849 1025
850 return 0; 1026 return 0;
1027error2:
1028 while (--i >= 0) {
1029 sysdev_remove_file(&per_cpu(device_mce, cpu),
1030 &bank_attrs[i]);
1031 }
851error: 1032error:
852 while (i--) { 1033 while (--i >= 0) {
853 sysdev_remove_file(&per_cpu(device_mce,cpu), 1034 sysdev_remove_file(&per_cpu(device_mce,cpu),
854 mce_attributes[i]); 1035 mce_attributes[i]);
855 } 1036 }
@@ -868,15 +1049,46 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
868 for (i = 0; mce_attributes[i]; i++) 1049 for (i = 0; mce_attributes[i]; i++)
869 sysdev_remove_file(&per_cpu(device_mce,cpu), 1050 sysdev_remove_file(&per_cpu(device_mce,cpu),
870 mce_attributes[i]); 1051 mce_attributes[i]);
1052 for (i = 0; i < banks; i++)
1053 sysdev_remove_file(&per_cpu(device_mce, cpu),
1054 &bank_attrs[i]);
871 sysdev_unregister(&per_cpu(device_mce,cpu)); 1055 sysdev_unregister(&per_cpu(device_mce,cpu));
872 cpu_clear(cpu, mce_device_initialized); 1056 cpu_clear(cpu, mce_device_initialized);
873} 1057}
874 1058
1059/* Make sure there are no machine checks on offlined CPUs. */
1060static void mce_disable_cpu(void *h)
1061{
1062 int i;
1063 unsigned long action = *(unsigned long *)h;
1064
1065 if (!mce_available(&current_cpu_data))
1066 return;
1067 if (!(action & CPU_TASKS_FROZEN))
1068 cmci_clear();
1069 for (i = 0; i < banks; i++)
1070 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
1071}
1072
1073static void mce_reenable_cpu(void *h)
1074{
1075 int i;
1076 unsigned long action = *(unsigned long *)h;
1077
1078 if (!mce_available(&current_cpu_data))
1079 return;
1080 if (!(action & CPU_TASKS_FROZEN))
1081 cmci_reenable();
1082 for (i = 0; i < banks; i++)
1083 wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]);
1084}
1085
875/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 1086/* Get notified when a cpu comes on/off. Be hotplug friendly. */
876static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, 1087static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
877 unsigned long action, void *hcpu) 1088 unsigned long action, void *hcpu)
878{ 1089{
879 unsigned int cpu = (unsigned long)hcpu; 1090 unsigned int cpu = (unsigned long)hcpu;
1091 struct timer_list *t = &per_cpu(mce_timer, cpu);
880 1092
881 switch (action) { 1093 switch (action) {
882 case CPU_ONLINE: 1094 case CPU_ONLINE:
@@ -891,6 +1103,21 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
891 threshold_cpu_callback(action, cpu); 1103 threshold_cpu_callback(action, cpu);
892 mce_remove_device(cpu); 1104 mce_remove_device(cpu);
893 break; 1105 break;
1106 case CPU_DOWN_PREPARE:
1107 case CPU_DOWN_PREPARE_FROZEN:
1108 del_timer_sync(t);
1109 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
1110 break;
1111 case CPU_DOWN_FAILED:
1112 case CPU_DOWN_FAILED_FROZEN:
1113 t->expires = round_jiffies(jiffies + next_interval);
1114 add_timer_on(t, cpu);
1115 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
1116 break;
1117 case CPU_POST_DEAD:
1118 /* intentionally ignoring frozen here */
1119 cmci_rediscover(cpu);
1120 break;
894 } 1121 }
895 return NOTIFY_OK; 1122 return NOTIFY_OK;
896} 1123}
@@ -899,6 +1126,34 @@ static struct notifier_block mce_cpu_notifier __cpuinitdata = {
899 .notifier_call = mce_cpu_callback, 1126 .notifier_call = mce_cpu_callback,
900}; 1127};
901 1128
1129static __init int mce_init_banks(void)
1130{
1131 int i;
1132
1133 bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks,
1134 GFP_KERNEL);
1135 if (!bank_attrs)
1136 return -ENOMEM;
1137
1138 for (i = 0; i < banks; i++) {
1139 struct sysdev_attribute *a = &bank_attrs[i];
1140 a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i);
1141 if (!a->attr.name)
1142 goto nomem;
1143 a->attr.mode = 0644;
1144 a->show = show_bank;
1145 a->store = set_bank;
1146 }
1147 return 0;
1148
1149nomem:
1150 while (--i >= 0)
1151 kfree(bank_attrs[i].attr.name);
1152 kfree(bank_attrs);
1153 bank_attrs = NULL;
1154 return -ENOMEM;
1155}
1156
902static __init int mce_init_device(void) 1157static __init int mce_init_device(void)
903{ 1158{
904 int err; 1159 int err;
@@ -906,6 +1161,11 @@ static __init int mce_init_device(void)
906 1161
907 if (!mce_available(&boot_cpu_data)) 1162 if (!mce_available(&boot_cpu_data))
908 return -EIO; 1163 return -EIO;
1164
1165 err = mce_init_banks();
1166 if (err)
1167 return err;
1168
909 err = sysdev_class_register(&mce_sysclass); 1169 err = sysdev_class_register(&mce_sysclass);
910 if (err) 1170 if (err)
911 return err; 1171 return err;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index f2ee0ae29bd..c5a32f92d07 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -67,7 +67,7 @@ static struct threshold_block threshold_defaults = {
67struct threshold_bank { 67struct threshold_bank {
68 struct kobject *kobj; 68 struct kobject *kobj;
69 struct threshold_block *blocks; 69 struct threshold_block *blocks;
70 cpumask_t cpus; 70 cpumask_var_t cpus;
71}; 71};
72static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); 72static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
73 73
@@ -79,6 +79,8 @@ static unsigned char shared_bank[NR_BANKS] = {
79 79
80static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ 80static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
81 81
82static void amd_threshold_interrupt(void);
83
82/* 84/*
83 * CPU Initialization 85 * CPU Initialization
84 */ 86 */
@@ -174,6 +176,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
174 tr.reset = 0; 176 tr.reset = 0;
175 tr.old_limit = 0; 177 tr.old_limit = 0;
176 threshold_restart_bank(&tr); 178 threshold_restart_bank(&tr);
179
180 mce_threshold_vector = amd_threshold_interrupt;
177 } 181 }
178 } 182 }
179} 183}
@@ -187,19 +191,13 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
187 * the interrupt goes off when error_count reaches threshold_limit. 191 * the interrupt goes off when error_count reaches threshold_limit.
188 * the handler will simply log mcelog w/ software defined bank number. 192 * the handler will simply log mcelog w/ software defined bank number.
189 */ 193 */
190asmlinkage void mce_threshold_interrupt(void) 194static void amd_threshold_interrupt(void)
191{ 195{
192 unsigned int bank, block; 196 unsigned int bank, block;
193 struct mce m; 197 struct mce m;
194 u32 low = 0, high = 0, address = 0; 198 u32 low = 0, high = 0, address = 0;
195 199
196 ack_APIC_irq(); 200 mce_setup(&m);
197 exit_idle();
198 irq_enter();
199
200 memset(&m, 0, sizeof(m));
201 rdtscll(m.tsc);
202 m.cpu = smp_processor_id();
203 201
204 /* assume first bank caused it */ 202 /* assume first bank caused it */
205 for (bank = 0; bank < NR_BANKS; ++bank) { 203 for (bank = 0; bank < NR_BANKS; ++bank) {
@@ -233,7 +231,8 @@ asmlinkage void mce_threshold_interrupt(void)
233 231
234 /* Log the machine check that caused the threshold 232 /* Log the machine check that caused the threshold
235 event. */ 233 event. */
236 do_machine_check(NULL, 0); 234 machine_check_poll(MCP_TIMESTAMP,
235 &__get_cpu_var(mce_poll_banks));
237 236
238 if (high & MASK_OVERFLOW_HI) { 237 if (high & MASK_OVERFLOW_HI) {
239 rdmsrl(address, m.misc); 238 rdmsrl(address, m.misc);
@@ -243,13 +242,10 @@ asmlinkage void mce_threshold_interrupt(void)
243 + bank * NR_BLOCKS 242 + bank * NR_BLOCKS
244 + block; 243 + block;
245 mce_log(&m); 244 mce_log(&m);
246 goto out; 245 return;
247 } 246 }
248 } 247 }
249 } 248 }
250out:
251 inc_irq_stat(irq_threshold_count);
252 irq_exit();
253} 249}
254 250
255/* 251/*
@@ -481,7 +477,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
481 477
482#ifdef CONFIG_SMP 478#ifdef CONFIG_SMP
483 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ 479 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
484 i = first_cpu(per_cpu(cpu_core_map, cpu)); 480 i = cpumask_first(&per_cpu(cpu_core_map, cpu));
485 481
486 /* first core not up yet */ 482 /* first core not up yet */
487 if (cpu_data(i).cpu_core_id) 483 if (cpu_data(i).cpu_core_id)
@@ -501,7 +497,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
501 if (err) 497 if (err)
502 goto out; 498 goto out;
503 499
504 b->cpus = per_cpu(cpu_core_map, cpu); 500 cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
505 per_cpu(threshold_banks, cpu)[bank] = b; 501 per_cpu(threshold_banks, cpu)[bank] = b;
506 goto out; 502 goto out;
507 } 503 }
@@ -512,15 +508,20 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
512 err = -ENOMEM; 508 err = -ENOMEM;
513 goto out; 509 goto out;
514 } 510 }
511 if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
512 kfree(b);
513 err = -ENOMEM;
514 goto out;
515 }
515 516
516 b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj); 517 b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj);
517 if (!b->kobj) 518 if (!b->kobj)
518 goto out_free; 519 goto out_free;
519 520
520#ifndef CONFIG_SMP 521#ifndef CONFIG_SMP
521 b->cpus = CPU_MASK_ALL; 522 cpumask_setall(b->cpus);
522#else 523#else
523 b->cpus = per_cpu(cpu_core_map, cpu); 524 cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
524#endif 525#endif
525 526
526 per_cpu(threshold_banks, cpu)[bank] = b; 527 per_cpu(threshold_banks, cpu)[bank] = b;
@@ -529,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
529 if (err) 530 if (err)
530 goto out_free; 531 goto out_free;
531 532
532 for_each_cpu_mask_nr(i, b->cpus) { 533 for_each_cpu(i, b->cpus) {
533 if (i == cpu) 534 if (i == cpu)
534 continue; 535 continue;
535 536
@@ -545,6 +546,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
545 546
546out_free: 547out_free:
547 per_cpu(threshold_banks, cpu)[bank] = NULL; 548 per_cpu(threshold_banks, cpu)[bank] = NULL;
549 free_cpumask_var(b->cpus);
548 kfree(b); 550 kfree(b);
549out: 551out:
550 return err; 552 return err;
@@ -619,7 +621,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
619#endif 621#endif
620 622
621 /* remove all sibling symlinks before unregistering */ 623 /* remove all sibling symlinks before unregistering */
622 for_each_cpu_mask_nr(i, b->cpus) { 624 for_each_cpu(i, b->cpus) {
623 if (i == cpu) 625 if (i == cpu)
624 continue; 626 continue;
625 627
@@ -632,6 +634,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
632free_out: 634free_out:
633 kobject_del(b->kobj); 635 kobject_del(b->kobj);
634 kobject_put(b->kobj); 636 kobject_put(b->kobj);
637 free_cpumask_var(b->cpus);
635 kfree(b); 638 kfree(b);
636 per_cpu(threshold_banks, cpu)[bank] = NULL; 639 per_cpu(threshold_banks, cpu)[bank] = NULL;
637} 640}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
index f44c3662436..aaa7d973093 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
@@ -1,17 +1,21 @@
1/* 1/*
2 * Intel specific MCE features. 2 * Intel specific MCE features.
3 * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca> 3 * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
4 * Copyright (C) 2008, 2009 Intel Corporation
5 * Author: Andi Kleen
4 */ 6 */
5 7
6#include <linux/init.h> 8#include <linux/init.h>
7#include <linux/interrupt.h> 9#include <linux/interrupt.h>
8#include <linux/percpu.h> 10#include <linux/percpu.h>
9#include <asm/processor.h> 11#include <asm/processor.h>
12#include <asm/apic.h>
10#include <asm/msr.h> 13#include <asm/msr.h>
11#include <asm/mce.h> 14#include <asm/mce.h>
12#include <asm/hw_irq.h> 15#include <asm/hw_irq.h>
13#include <asm/idle.h> 16#include <asm/idle.h>
14#include <asm/therm_throt.h> 17#include <asm/therm_throt.h>
18#include <asm/apic.h>
15 19
16asmlinkage void smp_thermal_interrupt(void) 20asmlinkage void smp_thermal_interrupt(void)
17{ 21{
@@ -24,7 +28,7 @@ asmlinkage void smp_thermal_interrupt(void)
24 28
25 rdmsrl(MSR_IA32_THERM_STATUS, msr_val); 29 rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
26 if (therm_throt_process(msr_val & 1)) 30 if (therm_throt_process(msr_val & 1))
27 mce_log_therm_throt_event(smp_processor_id(), msr_val); 31 mce_log_therm_throt_event(msr_val);
28 32
29 inc_irq_stat(irq_thermal_count); 33 inc_irq_stat(irq_thermal_count);
30 irq_exit(); 34 irq_exit();
@@ -48,13 +52,13 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
48 */ 52 */
49 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 53 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
50 h = apic_read(APIC_LVTTHMR); 54 h = apic_read(APIC_LVTTHMR);
51 if ((l & (1 << 3)) && (h & APIC_DM_SMI)) { 55 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
52 printk(KERN_DEBUG 56 printk(KERN_DEBUG
53 "CPU%d: Thermal monitoring handled by SMI\n", cpu); 57 "CPU%d: Thermal monitoring handled by SMI\n", cpu);
54 return; 58 return;
55 } 59 }
56 60
57 if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13))) 61 if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2))
58 tm2 = 1; 62 tm2 = 1;
59 63
60 if (h & APIC_VECTOR_MASK) { 64 if (h & APIC_VECTOR_MASK) {
@@ -72,7 +76,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
72 wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); 76 wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h);
73 77
74 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 78 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
75 wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h); 79 wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
76 80
77 l = apic_read(APIC_LVTTHMR); 81 l = apic_read(APIC_LVTTHMR);
78 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); 82 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
@@ -84,7 +88,209 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
84 return; 88 return;
85} 89}
86 90
91/*
92 * Support for Intel Correct Machine Check Interrupts. This allows
93 * the CPU to raise an interrupt when a corrected machine check happened.
94 * Normally we pick those up using a regular polling timer.
95 * Also supports reliable discovery of shared banks.
96 */
97
98static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
99
100/*
101 * cmci_discover_lock protects against parallel discovery attempts
102 * which could race against each other.
103 */
104static DEFINE_SPINLOCK(cmci_discover_lock);
105
106#define CMCI_THRESHOLD 1
107
108static int cmci_supported(int *banks)
109{
110 u64 cap;
111
112 /*
113 * Vendor check is not strictly needed, but the initial
114 * initialization is vendor keyed and this
115 * makes sure none of the backdoors are entered otherwise.
116 */
117 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
118 return 0;
119 if (!cpu_has_apic || lapic_get_maxlvt() < 6)
120 return 0;
121 rdmsrl(MSR_IA32_MCG_CAP, cap);
122 *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
123 return !!(cap & MCG_CMCI_P);
124}
125
126/*
127 * The interrupt handler. This is called on every event.
128 * Just call the poller directly to log any events.
129 * This could in theory increase the threshold under high load,
130 * but doesn't for now.
131 */
132static void intel_threshold_interrupt(void)
133{
134 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
135 mce_notify_user();
136}
137
138static void print_update(char *type, int *hdr, int num)
139{
140 if (*hdr == 0)
141 printk(KERN_INFO "CPU %d MCA banks", smp_processor_id());
142 *hdr = 1;
143 printk(KERN_CONT " %s:%d", type, num);
144}
145
146/*
147 * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
148 * on this CPU. Use the algorithm recommended in the SDM to discover shared
149 * banks.
150 */
151static void cmci_discover(int banks, int boot)
152{
153 unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
154 int hdr = 0;
155 int i;
156
157 spin_lock(&cmci_discover_lock);
158 for (i = 0; i < banks; i++) {
159 u64 val;
160
161 if (test_bit(i, owned))
162 continue;
163
164 rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
165
166 /* Already owned by someone else? */
167 if (val & CMCI_EN) {
168 if (test_and_clear_bit(i, owned) || boot)
169 print_update("SHD", &hdr, i);
170 __clear_bit(i, __get_cpu_var(mce_poll_banks));
171 continue;
172 }
173
174 val |= CMCI_EN | CMCI_THRESHOLD;
175 wrmsrl(MSR_IA32_MC0_CTL2 + i, val);
176 rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
177
178 /* Did the enable bit stick? -- the bank supports CMCI */
179 if (val & CMCI_EN) {
180 if (!test_and_set_bit(i, owned) || boot)
181 print_update("CMCI", &hdr, i);
182 __clear_bit(i, __get_cpu_var(mce_poll_banks));
183 } else {
184 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
185 }
186 }
187 spin_unlock(&cmci_discover_lock);
188 if (hdr)
189 printk(KERN_CONT "\n");
190}
191
192/*
193 * Just in case we missed an event during initialization check
194 * all the CMCI owned banks.
195 */
196void cmci_recheck(void)
197{
198 unsigned long flags;
199 int banks;
200
201 if (!mce_available(&current_cpu_data) || !cmci_supported(&banks))
202 return;
203 local_irq_save(flags);
204 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
205 local_irq_restore(flags);
206}
207
208/*
209 * Disable CMCI on this CPU for all banks it owns when it goes down.
210 * This allows other CPUs to claim the banks on rediscovery.
211 */
212void cmci_clear(void)
213{
214 int i;
215 int banks;
216 u64 val;
217
218 if (!cmci_supported(&banks))
219 return;
220 spin_lock(&cmci_discover_lock);
221 for (i = 0; i < banks; i++) {
222 if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
223 continue;
224 /* Disable CMCI */
225 rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
226 val &= ~(CMCI_EN|CMCI_THRESHOLD_MASK);
227 wrmsrl(MSR_IA32_MC0_CTL2 + i, val);
228 __clear_bit(i, __get_cpu_var(mce_banks_owned));
229 }
230 spin_unlock(&cmci_discover_lock);
231}
232
233/*
234 * After a CPU went down cycle through all the others and rediscover
235 * Must run in process context.
236 */
237void cmci_rediscover(int dying)
238{
239 int banks;
240 int cpu;
241 cpumask_var_t old;
242
243 if (!cmci_supported(&banks))
244 return;
245 if (!alloc_cpumask_var(&old, GFP_KERNEL))
246 return;
247 cpumask_copy(old, &current->cpus_allowed);
248
249 for_each_online_cpu (cpu) {
250 if (cpu == dying)
251 continue;
252 if (set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)))
253 continue;
254 /* Recheck banks in case CPUs don't all have the same */
255 if (cmci_supported(&banks))
256 cmci_discover(banks, 0);
257 }
258
259 set_cpus_allowed_ptr(current, old);
260 free_cpumask_var(old);
261}
262
263/*
264 * Reenable CMCI on this CPU in case a CPU down failed.
265 */
266void cmci_reenable(void)
267{
268 int banks;
269 if (cmci_supported(&banks))
270 cmci_discover(banks, 0);
271}
272
273static __cpuinit void intel_init_cmci(void)
274{
275 int banks;
276
277 if (!cmci_supported(&banks))
278 return;
279
280 mce_threshold_vector = intel_threshold_interrupt;
281 cmci_discover(banks, 1);
282 /*
283 * For CPU #0 this runs with still disabled APIC, but that's
284 * ok because only the vector is set up. We still do another
285 * check for the banks later for CPU #0 just to make sure
286 * to not miss any events.
287 */
288 apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED);
289 cmci_recheck();
290}
291
87void mce_intel_feature_init(struct cpuinfo_x86 *c) 292void mce_intel_feature_init(struct cpuinfo_x86 *c)
88{ 293{
89 intel_init_thermal(c); 294 intel_init_thermal(c);
295 intel_init_cmci();
90} 296}
diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c
index 9b60fce09f7..f53bdcbaf38 100644
--- a/arch/x86/kernel/cpu/mcheck/p4.c
+++ b/arch/x86/kernel/cpu/mcheck/p4.c
@@ -85,7 +85,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
85 */ 85 */
86 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 86 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
87 h = apic_read(APIC_LVTTHMR); 87 h = apic_read(APIC_LVTTHMR);
88 if ((l & (1<<3)) && (h & APIC_DM_SMI)) { 88 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
89 printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", 89 printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n",
90 cpu); 90 cpu);
91 return; /* -EBUSY */ 91 return; /* -EBUSY */
@@ -111,7 +111,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
111 vendor_thermal_interrupt = intel_thermal_interrupt; 111 vendor_thermal_interrupt = intel_thermal_interrupt;
112 112
113 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 113 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
114 wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h); 114 wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
115 115
116 l = apic_read(APIC_LVTTHMR); 116 l = apic_read(APIC_LVTTHMR);
117 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); 117 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
new file mode 100644
index 00000000000..23ee9e730f7
--- /dev/null
+++ b/arch/x86/kernel/cpu/mcheck/threshold.c
@@ -0,0 +1,29 @@
1/*
2 * Common corrected MCE threshold handler code:
3 */
4#include <linux/interrupt.h>
5#include <linux/kernel.h>
6
7#include <asm/irq_vectors.h>
8#include <asm/apic.h>
9#include <asm/idle.h>
10#include <asm/mce.h>
11
12static void default_threshold_interrupt(void)
13{
14 printk(KERN_ERR "Unexpected threshold interrupt at vector %x\n",
15 THRESHOLD_APIC_VECTOR);
16}
17
18void (*mce_threshold_vector)(void) = default_threshold_interrupt;
19
20asmlinkage void mce_threshold_interrupt(void)
21{
22 exit_idle();
23 irq_enter();
24 inc_irq_stat(irq_threshold_count);
25 mce_threshold_vector();
26 irq_exit();
27 /* Ack only at the end to avoid potential reentry */
28 ack_APIC_irq();
29}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 9abd48b2267..f6c70a164e3 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -19,7 +19,7 @@
19#include <linux/nmi.h> 19#include <linux/nmi.h>
20#include <linux/kprobes.h> 20#include <linux/kprobes.h>
21 21
22#include <asm/apic.h> 22#include <asm/genapic.h>
23#include <asm/intel_arch_perfmon.h> 23#include <asm/intel_arch_perfmon.h>
24 24
25struct nmi_watchdog_ctlblk { 25struct nmi_watchdog_ctlblk {
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 01b1244ef1c..d67e0e48bc2 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -7,11 +7,10 @@
7/* 7/*
8 * Get CPU information for use by the procfs. 8 * Get CPU information for use by the procfs.
9 */ 9 */
10#ifdef CONFIG_X86_32
11static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, 10static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
12 unsigned int cpu) 11 unsigned int cpu)
13{ 12{
14#ifdef CONFIG_X86_HT 13#ifdef CONFIG_SMP
15 if (c->x86_max_cores * smp_num_siblings > 1) { 14 if (c->x86_max_cores * smp_num_siblings > 1) {
16 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 15 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
17 seq_printf(m, "siblings\t: %d\n", 16 seq_printf(m, "siblings\t: %d\n",
@@ -24,6 +23,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
24#endif 23#endif
25} 24}
26 25
26#ifdef CONFIG_X86_32
27static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) 27static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
28{ 28{
29 /* 29 /*
@@ -50,22 +50,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
50 c->wp_works_ok ? "yes" : "no"); 50 c->wp_works_ok ? "yes" : "no");
51} 51}
52#else 52#else
53static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
54 unsigned int cpu)
55{
56#ifdef CONFIG_SMP
57 if (c->x86_max_cores * smp_num_siblings > 1) {
58 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
59 seq_printf(m, "siblings\t: %d\n",
60 cpus_weight(per_cpu(cpu_core_map, cpu)));
61 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
62 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
63 seq_printf(m, "apicid\t\t: %d\n", c->apicid);
64 seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid);
65 }
66#endif
67}
68
69static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) 53static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
70{ 54{
71 seq_printf(m, 55 seq_printf(m,
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index 52b3fefbd5a..bb62b3e5caa 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -98,7 +98,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
98#endif 98#endif
99} 99}
100 100
101static struct cpu_dev transmeta_cpu_dev __cpuinitdata = { 101static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = {
102 .c_vendor = "Transmeta", 102 .c_vendor = "Transmeta",
103 .c_ident = { "GenuineTMx86", "TransmetaCPU" }, 103 .c_ident = { "GenuineTMx86", "TransmetaCPU" },
104 .c_early_init = early_init_transmeta, 104 .c_early_init = early_init_transmeta,
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
index e777f79e096..fd2c37bf7ac 100644
--- a/arch/x86/kernel/cpu/umc.c
+++ b/arch/x86/kernel/cpu/umc.c
@@ -8,7 +8,7 @@
8 * so no special init takes place. 8 * so no special init takes place.
9 */ 9 */
10 10
11static struct cpu_dev umc_cpu_dev __cpuinitdata = { 11static const struct cpu_dev __cpuinitconst umc_cpu_dev = {
12 .c_vendor = "UMC", 12 .c_vendor = "UMC",
13 .c_ident = { "UMC UMC UMC" }, 13 .c_ident = { "UMC UMC UMC" },
14 .c_models = { 14 .c_models = {
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index c689d19e35a..ff958248e61 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -24,12 +24,10 @@
24#include <asm/apic.h> 24#include <asm/apic.h>
25#include <asm/hpet.h> 25#include <asm/hpet.h>
26#include <linux/kdebug.h> 26#include <linux/kdebug.h>
27#include <asm/smp.h> 27#include <asm/cpu.h>
28#include <asm/reboot.h> 28#include <asm/reboot.h>
29#include <asm/virtext.h> 29#include <asm/virtext.h>
30 30
31#include <mach_ipi.h>
32
33 31
34#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 32#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
35 33
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 6b1f6f6f866..87d103ded1c 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -99,7 +99,7 @@ print_context_stack(struct thread_info *tinfo,
99 frame = frame->next_frame; 99 frame = frame->next_frame;
100 bp = (unsigned long) frame; 100 bp = (unsigned long) frame;
101 } else { 101 } else {
102 ops->address(data, addr, bp == 0); 102 ops->address(data, addr, 0);
103 } 103 }
104 print_ftrace_graph_addr(addr, data, ops, tinfo, graph); 104 print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
105 } 105 }
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index c302d070704..d35db5993fd 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -106,7 +106,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
106 const struct stacktrace_ops *ops, void *data) 106 const struct stacktrace_ops *ops, void *data)
107{ 107{
108 const unsigned cpu = get_cpu(); 108 const unsigned cpu = get_cpu();
109 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; 109 unsigned long *irq_stack_end =
110 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
110 unsigned used = 0; 111 unsigned used = 0;
111 struct thread_info *tinfo; 112 struct thread_info *tinfo;
112 int graph = 0; 113 int graph = 0;
@@ -160,23 +161,23 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
160 stack = (unsigned long *) estack_end[-2]; 161 stack = (unsigned long *) estack_end[-2];
161 continue; 162 continue;
162 } 163 }
163 if (irqstack_end) { 164 if (irq_stack_end) {
164 unsigned long *irqstack; 165 unsigned long *irq_stack;
165 irqstack = irqstack_end - 166 irq_stack = irq_stack_end -
166 (IRQSTACKSIZE - 64) / sizeof(*irqstack); 167 (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack);
167 168
168 if (stack >= irqstack && stack < irqstack_end) { 169 if (stack >= irq_stack && stack < irq_stack_end) {
169 if (ops->stack(data, "IRQ") < 0) 170 if (ops->stack(data, "IRQ") < 0)
170 break; 171 break;
171 bp = print_context_stack(tinfo, stack, bp, 172 bp = print_context_stack(tinfo, stack, bp,
172 ops, data, irqstack_end, &graph); 173 ops, data, irq_stack_end, &graph);
173 /* 174 /*
174 * We link to the next stack (which would be 175 * We link to the next stack (which would be
175 * the process stack normally) the last 176 * the process stack normally) the last
176 * pointer (index -1 to end) in the IRQ stack: 177 * pointer (index -1 to end) in the IRQ stack:
177 */ 178 */
178 stack = (unsigned long *) (irqstack_end[-1]); 179 stack = (unsigned long *) (irq_stack_end[-1]);
179 irqstack_end = NULL; 180 irq_stack_end = NULL;
180 ops->stack(data, "EOI"); 181 ops->stack(data, "EOI");
181 continue; 182 continue;
182 } 183 }
@@ -199,10 +200,10 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
199 unsigned long *stack; 200 unsigned long *stack;
200 int i; 201 int i;
201 const int cpu = smp_processor_id(); 202 const int cpu = smp_processor_id();
202 unsigned long *irqstack_end = 203 unsigned long *irq_stack_end =
203 (unsigned long *) (cpu_pda(cpu)->irqstackptr); 204 (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
204 unsigned long *irqstack = 205 unsigned long *irq_stack =
205 (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); 206 (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
206 207
207 /* 208 /*
208 * debugging aid: "show_stack(NULL, NULL);" prints the 209 * debugging aid: "show_stack(NULL, NULL);" prints the
@@ -218,9 +219,9 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
218 219
219 stack = sp; 220 stack = sp;
220 for (i = 0; i < kstack_depth_to_print; i++) { 221 for (i = 0; i < kstack_depth_to_print; i++) {
221 if (stack >= irqstack && stack <= irqstack_end) { 222 if (stack >= irq_stack && stack <= irq_stack_end) {
222 if (stack == irqstack_end) { 223 if (stack == irq_stack_end) {
223 stack = (unsigned long *) (irqstack_end[-1]); 224 stack = (unsigned long *) (irq_stack_end[-1]);
224 printk(" <EOI> "); 225 printk(" <EOI> ");
225 } 226 }
226 } else { 227 } else {
@@ -241,7 +242,7 @@ void show_registers(struct pt_regs *regs)
241 int i; 242 int i;
242 unsigned long sp; 243 unsigned long sp;
243 const int cpu = smp_processor_id(); 244 const int cpu = smp_processor_id();
244 struct task_struct *cur = cpu_pda(cpu)->pcurrent; 245 struct task_struct *cur = current;
245 246
246 sp = regs->sp; 247 sp = regs->sp;
247 printk("CPU %d ", cpu); 248 printk("CPU %d ", cpu);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index e85826829cf..95b81c18b6b 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -110,19 +110,25 @@ int __init e820_all_mapped(u64 start, u64 end, unsigned type)
110/* 110/*
111 * Add a memory region to the kernel e820 map. 111 * Add a memory region to the kernel e820 map.
112 */ 112 */
113void __init e820_add_region(u64 start, u64 size, int type) 113static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size,
114 int type)
114{ 115{
115 int x = e820.nr_map; 116 int x = e820x->nr_map;
116 117
117 if (x == ARRAY_SIZE(e820.map)) { 118 if (x == ARRAY_SIZE(e820x->map)) {
118 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); 119 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
119 return; 120 return;
120 } 121 }
121 122
122 e820.map[x].addr = start; 123 e820x->map[x].addr = start;
123 e820.map[x].size = size; 124 e820x->map[x].size = size;
124 e820.map[x].type = type; 125 e820x->map[x].type = type;
125 e820.nr_map++; 126 e820x->nr_map++;
127}
128
129void __init e820_add_region(u64 start, u64 size, int type)
130{
131 __e820_add_region(&e820, start, size, type);
126} 132}
127 133
128void __init e820_print_map(char *who) 134void __init e820_print_map(char *who)
@@ -417,11 +423,11 @@ static int __init append_e820_map(struct e820entry *biosmap, int nr_map)
417 return __append_e820_map(biosmap, nr_map); 423 return __append_e820_map(biosmap, nr_map);
418} 424}
419 425
420static u64 __init e820_update_range_map(struct e820map *e820x, u64 start, 426static u64 __init __e820_update_range(struct e820map *e820x, u64 start,
421 u64 size, unsigned old_type, 427 u64 size, unsigned old_type,
422 unsigned new_type) 428 unsigned new_type)
423{ 429{
424 int i; 430 unsigned int i;
425 u64 real_updated_size = 0; 431 u64 real_updated_size = 0;
426 432
427 BUG_ON(old_type == new_type); 433 BUG_ON(old_type == new_type);
@@ -429,7 +435,7 @@ static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
429 if (size > (ULLONG_MAX - start)) 435 if (size > (ULLONG_MAX - start))
430 size = ULLONG_MAX - start; 436 size = ULLONG_MAX - start;
431 437
432 for (i = 0; i < e820.nr_map; i++) { 438 for (i = 0; i < e820x->nr_map; i++) {
433 struct e820entry *ei = &e820x->map[i]; 439 struct e820entry *ei = &e820x->map[i];
434 u64 final_start, final_end; 440 u64 final_start, final_end;
435 if (ei->type != old_type) 441 if (ei->type != old_type)
@@ -446,10 +452,16 @@ static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
446 final_end = min(start + size, ei->addr + ei->size); 452 final_end = min(start + size, ei->addr + ei->size);
447 if (final_start >= final_end) 453 if (final_start >= final_end)
448 continue; 454 continue;
449 e820_add_region(final_start, final_end - final_start, 455
450 new_type); 456 __e820_add_region(e820x, final_start, final_end - final_start,
457 new_type);
458
451 real_updated_size += final_end - final_start; 459 real_updated_size += final_end - final_start;
452 460
461 /*
462 * left range could be head or tail, so need to update
463 * size at first.
464 */
453 ei->size -= final_end - final_start; 465 ei->size -= final_end - final_start;
454 if (ei->addr < final_start) 466 if (ei->addr < final_start)
455 continue; 467 continue;
@@ -461,13 +473,13 @@ static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
461u64 __init e820_update_range(u64 start, u64 size, unsigned old_type, 473u64 __init e820_update_range(u64 start, u64 size, unsigned old_type,
462 unsigned new_type) 474 unsigned new_type)
463{ 475{
464 return e820_update_range_map(&e820, start, size, old_type, new_type); 476 return __e820_update_range(&e820, start, size, old_type, new_type);
465} 477}
466 478
467static u64 __init e820_update_range_saved(u64 start, u64 size, 479static u64 __init e820_update_range_saved(u64 start, u64 size,
468 unsigned old_type, unsigned new_type) 480 unsigned old_type, unsigned new_type)
469{ 481{
470 return e820_update_range_map(&e820_saved, start, size, old_type, 482 return __e820_update_range(&e820_saved, start, size, old_type,
471 new_type); 483 new_type);
472} 484}
473 485
@@ -858,6 +870,9 @@ void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
858 */ 870 */
859void __init reserve_early(u64 start, u64 end, char *name) 871void __init reserve_early(u64 start, u64 end, char *name)
860{ 872{
873 if (start >= end)
874 return;
875
861 drop_overlaps_that_are_ok(start, end); 876 drop_overlaps_that_are_ok(start, end);
862 __reserve_early(start, end, name, 0); 877 __reserve_early(start, end, name, 0);
863} 878}
@@ -1017,8 +1032,8 @@ u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
1017 continue; 1032 continue;
1018 return addr; 1033 return addr;
1019 } 1034 }
1020 return -1UL;
1021 1035
1036 return -1ULL;
1022} 1037}
1023 1038
1024/* 1039/*
@@ -1031,13 +1046,22 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
1031 u64 start; 1046 u64 start;
1032 1047
1033 start = startt; 1048 start = startt;
1034 while (size < sizet) 1049 while (size < sizet && (start + 1))
1035 start = find_e820_area_size(start, &size, align); 1050 start = find_e820_area_size(start, &size, align);
1036 1051
1037 if (size < sizet) 1052 if (size < sizet)
1038 return 0; 1053 return 0;
1039 1054
1055#ifdef CONFIG_X86_32
1056 if (start >= MAXMEM)
1057 return 0;
1058 if (start + size > MAXMEM)
1059 size = MAXMEM - start;
1060#endif
1061
1040 addr = round_down(start + size - sizet, align); 1062 addr = round_down(start + size - sizet, align);
1063 if (addr < start)
1064 return 0;
1041 e820_update_range(addr, sizet, E820_RAM, E820_RESERVED); 1065 e820_update_range(addr, sizet, E820_RAM, E820_RESERVED);
1042 e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); 1066 e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED);
1043 printk(KERN_INFO "update e820 for early_reserve_e820\n"); 1067 printk(KERN_INFO "update e820 for early_reserve_e820\n");
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 504ad198e4a..335f049d110 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -13,8 +13,8 @@
13#include <asm/setup.h> 13#include <asm/setup.h>
14#include <xen/hvc-console.h> 14#include <xen/hvc-console.h>
15#include <asm/pci-direct.h> 15#include <asm/pci-direct.h>
16#include <asm/pgtable.h>
17#include <asm/fixmap.h> 16#include <asm/fixmap.h>
17#include <asm/pgtable.h>
18#include <linux/usb/ehci_def.h> 18#include <linux/usb/ehci_def.h>
19 19
20/* Simple VGA output */ 20/* Simple VGA output */
@@ -250,7 +250,7 @@ static int dbgp_wait_until_complete(void)
250 return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl); 250 return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl);
251} 251}
252 252
253static void dbgp_mdelay(int ms) 253static void __init dbgp_mdelay(int ms)
254{ 254{
255 int i; 255 int i;
256 256
@@ -311,7 +311,7 @@ static void dbgp_set_data(const void *buf, int size)
311 writel(hi, &ehci_debug->data47); 311 writel(hi, &ehci_debug->data47);
312} 312}
313 313
314static void dbgp_get_data(void *buf, int size) 314static void __init dbgp_get_data(void *buf, int size)
315{ 315{
316 unsigned char *bytes = buf; 316 unsigned char *bytes = buf;
317 u32 lo, hi; 317 u32 lo, hi;
@@ -355,7 +355,7 @@ static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
355 return ret; 355 return ret;
356} 356}
357 357
358static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data, 358static int __init dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
359 int size) 359 int size)
360{ 360{
361 u32 pids, addr, ctrl; 361 u32 pids, addr, ctrl;
@@ -386,8 +386,8 @@ static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
386 return ret; 386 return ret;
387} 387}
388 388
389static int dbgp_control_msg(unsigned devnum, int requesttype, int request, 389static int __init dbgp_control_msg(unsigned devnum, int requesttype,
390 int value, int index, void *data, int size) 390 int request, int value, int index, void *data, int size)
391{ 391{
392 u32 pids, addr, ctrl; 392 u32 pids, addr, ctrl;
393 struct usb_ctrlrequest req; 393 struct usb_ctrlrequest req;
@@ -489,7 +489,7 @@ static u32 __init find_dbgp(int ehci_num, u32 *rbus, u32 *rslot, u32 *rfunc)
489 return 0; 489 return 0;
490} 490}
491 491
492static int ehci_reset_port(int port) 492static int __init ehci_reset_port(int port)
493{ 493{
494 u32 portsc; 494 u32 portsc;
495 u32 delay_time, delay; 495 u32 delay_time, delay;
@@ -532,7 +532,7 @@ static int ehci_reset_port(int port)
532 return -EBUSY; 532 return -EBUSY;
533} 533}
534 534
535static int ehci_wait_for_port(int port) 535static int __init ehci_wait_for_port(int port)
536{ 536{
537 u32 status; 537 u32 status;
538 int ret, reps; 538 int ret, reps;
@@ -557,13 +557,13 @@ static inline void dbgp_printk(const char *fmt, ...) { }
557 557
558typedef void (*set_debug_port_t)(int port); 558typedef void (*set_debug_port_t)(int port);
559 559
560static void default_set_debug_port(int port) 560static void __init default_set_debug_port(int port)
561{ 561{
562} 562}
563 563
564static set_debug_port_t set_debug_port = default_set_debug_port; 564static set_debug_port_t __initdata set_debug_port = default_set_debug_port;
565 565
566static void nvidia_set_debug_port(int port) 566static void __init nvidia_set_debug_port(int port)
567{ 567{
568 u32 dword; 568 u32 dword;
569 dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, 569 dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index eb1ef3b67dd..1736acc4d7a 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -366,10 +366,12 @@ void __init efi_init(void)
366 SMBIOS_TABLE_GUID)) { 366 SMBIOS_TABLE_GUID)) {
367 efi.smbios = config_tables[i].table; 367 efi.smbios = config_tables[i].table;
368 printk(" SMBIOS=0x%lx ", config_tables[i].table); 368 printk(" SMBIOS=0x%lx ", config_tables[i].table);
369#ifdef CONFIG_X86_UV
369 } else if (!efi_guidcmp(config_tables[i].guid, 370 } else if (!efi_guidcmp(config_tables[i].guid,
370 UV_SYSTEM_TABLE_GUID)) { 371 UV_SYSTEM_TABLE_GUID)) {
371 efi.uv_systab = config_tables[i].table; 372 efi.uv_systab = config_tables[i].table;
372 printk(" UVsystab=0x%lx ", config_tables[i].table); 373 printk(" UVsystab=0x%lx ", config_tables[i].table);
374#endif
373 } else if (!efi_guidcmp(config_tables[i].guid, 375 } else if (!efi_guidcmp(config_tables[i].guid,
374 HCDP_TABLE_GUID)) { 376 HCDP_TABLE_GUID)) {
375 efi.hcdp = config_tables[i].table; 377 efi.hcdp = config_tables[i].table;
diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c
index cb783b92c50..22c3b7828c5 100644
--- a/arch/x86/kernel/efi_64.c
+++ b/arch/x86/kernel/efi_64.c
@@ -36,6 +36,7 @@
36#include <asm/proto.h> 36#include <asm/proto.h>
37#include <asm/efi.h> 37#include <asm/efi.h>
38#include <asm/cacheflush.h> 38#include <asm/cacheflush.h>
39#include <asm/fixmap.h>
39 40
40static pgd_t save_pgd __initdata; 41static pgd_t save_pgd __initdata;
41static unsigned long efi_flags __initdata; 42static unsigned long efi_flags __initdata;
diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
index ef00bb77d7e..fbe66e626c0 100644
--- a/arch/x86/kernel/efi_stub_32.S
+++ b/arch/x86/kernel/efi_stub_32.S
@@ -6,7 +6,7 @@
6 */ 6 */
7 7
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/page.h> 9#include <asm/page_types.h>
10 10
11/* 11/*
12 * efi_call_phys(void *, ...) is a function with variable parameters. 12 * efi_call_phys(void *, ...) is a function with variable parameters.
@@ -113,6 +113,7 @@ ENTRY(efi_call_phys)
113 movl (%edx), %ecx 113 movl (%edx), %ecx
114 pushl %ecx 114 pushl %ecx
115 ret 115 ret
116ENDPROC(efi_call_phys)
116.previous 117.previous
117 118
118.data 119.data
diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
index 99b47d48c9f..4c07ccab814 100644
--- a/arch/x86/kernel/efi_stub_64.S
+++ b/arch/x86/kernel/efi_stub_64.S
@@ -41,6 +41,7 @@ ENTRY(efi_call0)
41 addq $32, %rsp 41 addq $32, %rsp
42 RESTORE_XMM 42 RESTORE_XMM
43 ret 43 ret
44ENDPROC(efi_call0)
44 45
45ENTRY(efi_call1) 46ENTRY(efi_call1)
46 SAVE_XMM 47 SAVE_XMM
@@ -50,6 +51,7 @@ ENTRY(efi_call1)
50 addq $32, %rsp 51 addq $32, %rsp
51 RESTORE_XMM 52 RESTORE_XMM
52 ret 53 ret
54ENDPROC(efi_call1)
53 55
54ENTRY(efi_call2) 56ENTRY(efi_call2)
55 SAVE_XMM 57 SAVE_XMM
@@ -59,6 +61,7 @@ ENTRY(efi_call2)
59 addq $32, %rsp 61 addq $32, %rsp
60 RESTORE_XMM 62 RESTORE_XMM
61 ret 63 ret
64ENDPROC(efi_call2)
62 65
63ENTRY(efi_call3) 66ENTRY(efi_call3)
64 SAVE_XMM 67 SAVE_XMM
@@ -69,6 +72,7 @@ ENTRY(efi_call3)
69 addq $32, %rsp 72 addq $32, %rsp
70 RESTORE_XMM 73 RESTORE_XMM
71 ret 74 ret
75ENDPROC(efi_call3)
72 76
73ENTRY(efi_call4) 77ENTRY(efi_call4)
74 SAVE_XMM 78 SAVE_XMM
@@ -80,6 +84,7 @@ ENTRY(efi_call4)
80 addq $32, %rsp 84 addq $32, %rsp
81 RESTORE_XMM 85 RESTORE_XMM
82 ret 86 ret
87ENDPROC(efi_call4)
83 88
84ENTRY(efi_call5) 89ENTRY(efi_call5)
85 SAVE_XMM 90 SAVE_XMM
@@ -92,6 +97,7 @@ ENTRY(efi_call5)
92 addq $48, %rsp 97 addq $48, %rsp
93 RESTORE_XMM 98 RESTORE_XMM
94 ret 99 ret
100ENDPROC(efi_call5)
95 101
96ENTRY(efi_call6) 102ENTRY(efi_call6)
97 SAVE_XMM 103 SAVE_XMM
@@ -107,3 +113,4 @@ ENTRY(efi_call6)
107 addq $48, %rsp 113 addq $48, %rsp
108 RESTORE_XMM 114 RESTORE_XMM
109 ret 115 ret
116ENDPROC(efi_call6)
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 46469029e9d..c929add475c 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -30,12 +30,13 @@
30 * 1C(%esp) - %ds 30 * 1C(%esp) - %ds
31 * 20(%esp) - %es 31 * 20(%esp) - %es
32 * 24(%esp) - %fs 32 * 24(%esp) - %fs
33 * 28(%esp) - orig_eax 33 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
34 * 2C(%esp) - %eip 34 * 2C(%esp) - orig_eax
35 * 30(%esp) - %cs 35 * 30(%esp) - %eip
36 * 34(%esp) - %eflags 36 * 34(%esp) - %cs
37 * 38(%esp) - %oldesp 37 * 38(%esp) - %eflags
38 * 3C(%esp) - %oldss 38 * 3C(%esp) - %oldesp
39 * 40(%esp) - %oldss
39 * 40 *
40 * "current" is in register %ebx during any slow entries. 41 * "current" is in register %ebx during any slow entries.
41 */ 42 */
@@ -46,7 +47,7 @@
46#include <asm/errno.h> 47#include <asm/errno.h>
47#include <asm/segment.h> 48#include <asm/segment.h>
48#include <asm/smp.h> 49#include <asm/smp.h>
49#include <asm/page.h> 50#include <asm/page_types.h>
50#include <asm/desc.h> 51#include <asm/desc.h>
51#include <asm/percpu.h> 52#include <asm/percpu.h>
52#include <asm/dwarf2.h> 53#include <asm/dwarf2.h>
@@ -101,121 +102,221 @@
101#define resume_userspace_sig resume_userspace 102#define resume_userspace_sig resume_userspace
102#endif 103#endif
103 104
104#define SAVE_ALL \ 105/*
105 cld; \ 106 * User gs save/restore
106 pushl %fs; \ 107 *
107 CFI_ADJUST_CFA_OFFSET 4;\ 108 * %gs is used for userland TLS and kernel only uses it for stack
108 /*CFI_REL_OFFSET fs, 0;*/\ 109 * canary which is required to be at %gs:20 by gcc. Read the comment
109 pushl %es; \ 110 * at the top of stackprotector.h for more info.
110 CFI_ADJUST_CFA_OFFSET 4;\ 111 *
111 /*CFI_REL_OFFSET es, 0;*/\ 112 * Local labels 98 and 99 are used.
112 pushl %ds; \ 113 */
113 CFI_ADJUST_CFA_OFFSET 4;\ 114#ifdef CONFIG_X86_32_LAZY_GS
114 /*CFI_REL_OFFSET ds, 0;*/\ 115
115 pushl %eax; \ 116 /* unfortunately push/pop can't be no-op */
116 CFI_ADJUST_CFA_OFFSET 4;\ 117.macro PUSH_GS
117 CFI_REL_OFFSET eax, 0;\ 118 pushl $0
118 pushl %ebp; \ 119 CFI_ADJUST_CFA_OFFSET 4
119 CFI_ADJUST_CFA_OFFSET 4;\ 120.endm
120 CFI_REL_OFFSET ebp, 0;\ 121.macro POP_GS pop=0
121 pushl %edi; \ 122 addl $(4 + \pop), %esp
122 CFI_ADJUST_CFA_OFFSET 4;\ 123 CFI_ADJUST_CFA_OFFSET -(4 + \pop)
123 CFI_REL_OFFSET edi, 0;\ 124.endm
124 pushl %esi; \ 125.macro POP_GS_EX
125 CFI_ADJUST_CFA_OFFSET 4;\ 126.endm
126 CFI_REL_OFFSET esi, 0;\ 127
127 pushl %edx; \ 128 /* all the rest are no-op */
128 CFI_ADJUST_CFA_OFFSET 4;\ 129.macro PTGS_TO_GS
129 CFI_REL_OFFSET edx, 0;\ 130.endm
130 pushl %ecx; \ 131.macro PTGS_TO_GS_EX
131 CFI_ADJUST_CFA_OFFSET 4;\ 132.endm
132 CFI_REL_OFFSET ecx, 0;\ 133.macro GS_TO_REG reg
133 pushl %ebx; \ 134.endm
134 CFI_ADJUST_CFA_OFFSET 4;\ 135.macro REG_TO_PTGS reg
135 CFI_REL_OFFSET ebx, 0;\ 136.endm
136 movl $(__USER_DS), %edx; \ 137.macro SET_KERNEL_GS reg
137 movl %edx, %ds; \ 138.endm
138 movl %edx, %es; \ 139
139 movl $(__KERNEL_PERCPU), %edx; \ 140#else /* CONFIG_X86_32_LAZY_GS */
141
142.macro PUSH_GS
143 pushl %gs
144 CFI_ADJUST_CFA_OFFSET 4
145 /*CFI_REL_OFFSET gs, 0*/
146.endm
147
148.macro POP_GS pop=0
14998: popl %gs
150 CFI_ADJUST_CFA_OFFSET -4
151 /*CFI_RESTORE gs*/
152 .if \pop <> 0
153 add $\pop, %esp
154 CFI_ADJUST_CFA_OFFSET -\pop
155 .endif
156.endm
157.macro POP_GS_EX
158.pushsection .fixup, "ax"
15999: movl $0, (%esp)
160 jmp 98b
161.section __ex_table, "a"
162 .align 4
163 .long 98b, 99b
164.popsection
165.endm
166
167.macro PTGS_TO_GS
16898: mov PT_GS(%esp), %gs
169.endm
170.macro PTGS_TO_GS_EX
171.pushsection .fixup, "ax"
17299: movl $0, PT_GS(%esp)
173 jmp 98b
174.section __ex_table, "a"
175 .align 4
176 .long 98b, 99b
177.popsection
178.endm
179
180.macro GS_TO_REG reg
181 movl %gs, \reg
182 /*CFI_REGISTER gs, \reg*/
183.endm
184.macro REG_TO_PTGS reg
185 movl \reg, PT_GS(%esp)
186 /*CFI_REL_OFFSET gs, PT_GS*/
187.endm
188.macro SET_KERNEL_GS reg
189 movl $(__KERNEL_STACK_CANARY), \reg
190 movl \reg, %gs
191.endm
192
193#endif /* CONFIG_X86_32_LAZY_GS */
194
195.macro SAVE_ALL
196 cld
197 PUSH_GS
198 pushl %fs
199 CFI_ADJUST_CFA_OFFSET 4
200 /*CFI_REL_OFFSET fs, 0;*/
201 pushl %es
202 CFI_ADJUST_CFA_OFFSET 4
203 /*CFI_REL_OFFSET es, 0;*/
204 pushl %ds
205 CFI_ADJUST_CFA_OFFSET 4
206 /*CFI_REL_OFFSET ds, 0;*/
207 pushl %eax
208 CFI_ADJUST_CFA_OFFSET 4
209 CFI_REL_OFFSET eax, 0
210 pushl %ebp
211 CFI_ADJUST_CFA_OFFSET 4
212 CFI_REL_OFFSET ebp, 0
213 pushl %edi
214 CFI_ADJUST_CFA_OFFSET 4
215 CFI_REL_OFFSET edi, 0
216 pushl %esi
217 CFI_ADJUST_CFA_OFFSET 4
218 CFI_REL_OFFSET esi, 0
219 pushl %edx
220 CFI_ADJUST_CFA_OFFSET 4
221 CFI_REL_OFFSET edx, 0
222 pushl %ecx
223 CFI_ADJUST_CFA_OFFSET 4
224 CFI_REL_OFFSET ecx, 0
225 pushl %ebx
226 CFI_ADJUST_CFA_OFFSET 4
227 CFI_REL_OFFSET ebx, 0
228 movl $(__USER_DS), %edx
229 movl %edx, %ds
230 movl %edx, %es
231 movl $(__KERNEL_PERCPU), %edx
140 movl %edx, %fs 232 movl %edx, %fs
233 SET_KERNEL_GS %edx
234.endm
141 235
142#define RESTORE_INT_REGS \ 236.macro RESTORE_INT_REGS
143 popl %ebx; \ 237 popl %ebx
144 CFI_ADJUST_CFA_OFFSET -4;\ 238 CFI_ADJUST_CFA_OFFSET -4
145 CFI_RESTORE ebx;\ 239 CFI_RESTORE ebx
146 popl %ecx; \ 240 popl %ecx
147 CFI_ADJUST_CFA_OFFSET -4;\ 241 CFI_ADJUST_CFA_OFFSET -4
148 CFI_RESTORE ecx;\ 242 CFI_RESTORE ecx
149 popl %edx; \ 243 popl %edx
150 CFI_ADJUST_CFA_OFFSET -4;\ 244 CFI_ADJUST_CFA_OFFSET -4
151 CFI_RESTORE edx;\ 245 CFI_RESTORE edx
152 popl %esi; \ 246 popl %esi
153 CFI_ADJUST_CFA_OFFSET -4;\ 247 CFI_ADJUST_CFA_OFFSET -4
154 CFI_RESTORE esi;\ 248 CFI_RESTORE esi
155 popl %edi; \ 249 popl %edi
156 CFI_ADJUST_CFA_OFFSET -4;\ 250 CFI_ADJUST_CFA_OFFSET -4
157 CFI_RESTORE edi;\ 251 CFI_RESTORE edi
158 popl %ebp; \ 252 popl %ebp
159 CFI_ADJUST_CFA_OFFSET -4;\ 253 CFI_ADJUST_CFA_OFFSET -4
160 CFI_RESTORE ebp;\ 254 CFI_RESTORE ebp
161 popl %eax; \ 255 popl %eax
162 CFI_ADJUST_CFA_OFFSET -4;\ 256 CFI_ADJUST_CFA_OFFSET -4
163 CFI_RESTORE eax 257 CFI_RESTORE eax
258.endm
164 259
165#define RESTORE_REGS \ 260.macro RESTORE_REGS pop=0
166 RESTORE_INT_REGS; \ 261 RESTORE_INT_REGS
1671: popl %ds; \ 2621: popl %ds
168 CFI_ADJUST_CFA_OFFSET -4;\ 263 CFI_ADJUST_CFA_OFFSET -4
169 /*CFI_RESTORE ds;*/\ 264 /*CFI_RESTORE ds;*/
1702: popl %es; \ 2652: popl %es
171 CFI_ADJUST_CFA_OFFSET -4;\ 266 CFI_ADJUST_CFA_OFFSET -4
172 /*CFI_RESTORE es;*/\ 267 /*CFI_RESTORE es;*/
1733: popl %fs; \ 2683: popl %fs
174 CFI_ADJUST_CFA_OFFSET -4;\ 269 CFI_ADJUST_CFA_OFFSET -4
175 /*CFI_RESTORE fs;*/\ 270 /*CFI_RESTORE fs;*/
176.pushsection .fixup,"ax"; \ 271 POP_GS \pop
1774: movl $0,(%esp); \ 272.pushsection .fixup, "ax"
178 jmp 1b; \ 2734: movl $0, (%esp)
1795: movl $0,(%esp); \ 274 jmp 1b
180 jmp 2b; \ 2755: movl $0, (%esp)
1816: movl $0,(%esp); \ 276 jmp 2b
182 jmp 3b; \ 2776: movl $0, (%esp)
183.section __ex_table,"a";\ 278 jmp 3b
184 .align 4; \ 279.section __ex_table, "a"
185 .long 1b,4b; \ 280 .align 4
186 .long 2b,5b; \ 281 .long 1b, 4b
187 .long 3b,6b; \ 282 .long 2b, 5b
283 .long 3b, 6b
188.popsection 284.popsection
285 POP_GS_EX
286.endm
189 287
190#define RING0_INT_FRAME \ 288.macro RING0_INT_FRAME
191 CFI_STARTPROC simple;\ 289 CFI_STARTPROC simple
192 CFI_SIGNAL_FRAME;\ 290 CFI_SIGNAL_FRAME
193 CFI_DEF_CFA esp, 3*4;\ 291 CFI_DEF_CFA esp, 3*4
194 /*CFI_OFFSET cs, -2*4;*/\ 292 /*CFI_OFFSET cs, -2*4;*/
195 CFI_OFFSET eip, -3*4 293 CFI_OFFSET eip, -3*4
294.endm
196 295
197#define RING0_EC_FRAME \ 296.macro RING0_EC_FRAME
198 CFI_STARTPROC simple;\ 297 CFI_STARTPROC simple
199 CFI_SIGNAL_FRAME;\ 298 CFI_SIGNAL_FRAME
200 CFI_DEF_CFA esp, 4*4;\ 299 CFI_DEF_CFA esp, 4*4
201 /*CFI_OFFSET cs, -2*4;*/\ 300 /*CFI_OFFSET cs, -2*4;*/
202 CFI_OFFSET eip, -3*4 301 CFI_OFFSET eip, -3*4
302.endm
203 303
204#define RING0_PTREGS_FRAME \ 304.macro RING0_PTREGS_FRAME
205 CFI_STARTPROC simple;\ 305 CFI_STARTPROC simple
206 CFI_SIGNAL_FRAME;\ 306 CFI_SIGNAL_FRAME
207 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\ 307 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
208 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\ 308 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
209 CFI_OFFSET eip, PT_EIP-PT_OLDESP;\ 309 CFI_OFFSET eip, PT_EIP-PT_OLDESP
210 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\ 310 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
211 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\ 311 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
212 CFI_OFFSET eax, PT_EAX-PT_OLDESP;\ 312 CFI_OFFSET eax, PT_EAX-PT_OLDESP
213 CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\ 313 CFI_OFFSET ebp, PT_EBP-PT_OLDESP
214 CFI_OFFSET edi, PT_EDI-PT_OLDESP;\ 314 CFI_OFFSET edi, PT_EDI-PT_OLDESP
215 CFI_OFFSET esi, PT_ESI-PT_OLDESP;\ 315 CFI_OFFSET esi, PT_ESI-PT_OLDESP
216 CFI_OFFSET edx, PT_EDX-PT_OLDESP;\ 316 CFI_OFFSET edx, PT_EDX-PT_OLDESP
217 CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\ 317 CFI_OFFSET ecx, PT_ECX-PT_OLDESP
218 CFI_OFFSET ebx, PT_EBX-PT_OLDESP 318 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
319.endm
219 320
220ENTRY(ret_from_fork) 321ENTRY(ret_from_fork)
221 CFI_STARTPROC 322 CFI_STARTPROC
@@ -341,8 +442,7 @@ sysenter_past_esp:
341 442
342 GET_THREAD_INFO(%ebp) 443 GET_THREAD_INFO(%ebp)
343 444
344 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ 445 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
345 testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
346 jnz sysenter_audit 446 jnz sysenter_audit
347sysenter_do_call: 447sysenter_do_call:
348 cmpl $(nr_syscalls), %eax 448 cmpl $(nr_syscalls), %eax
@@ -353,7 +453,7 @@ sysenter_do_call:
353 DISABLE_INTERRUPTS(CLBR_ANY) 453 DISABLE_INTERRUPTS(CLBR_ANY)
354 TRACE_IRQS_OFF 454 TRACE_IRQS_OFF
355 movl TI_flags(%ebp), %ecx 455 movl TI_flags(%ebp), %ecx
356 testw $_TIF_ALLWORK_MASK, %cx 456 testl $_TIF_ALLWORK_MASK, %ecx
357 jne sysexit_audit 457 jne sysexit_audit
358sysenter_exit: 458sysenter_exit:
359/* if something modifies registers it must also disable sysexit */ 459/* if something modifies registers it must also disable sysexit */
@@ -362,11 +462,12 @@ sysenter_exit:
362 xorl %ebp,%ebp 462 xorl %ebp,%ebp
363 TRACE_IRQS_ON 463 TRACE_IRQS_ON
3641: mov PT_FS(%esp), %fs 4641: mov PT_FS(%esp), %fs
465 PTGS_TO_GS
365 ENABLE_INTERRUPTS_SYSEXIT 466 ENABLE_INTERRUPTS_SYSEXIT
366 467
367#ifdef CONFIG_AUDITSYSCALL 468#ifdef CONFIG_AUDITSYSCALL
368sysenter_audit: 469sysenter_audit:
369 testw $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) 470 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
370 jnz syscall_trace_entry 471 jnz syscall_trace_entry
371 addl $4,%esp 472 addl $4,%esp
372 CFI_ADJUST_CFA_OFFSET -4 473 CFI_ADJUST_CFA_OFFSET -4
@@ -383,7 +484,7 @@ sysenter_audit:
383 jmp sysenter_do_call 484 jmp sysenter_do_call
384 485
385sysexit_audit: 486sysexit_audit:
386 testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx 487 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
387 jne syscall_exit_work 488 jne syscall_exit_work
388 TRACE_IRQS_ON 489 TRACE_IRQS_ON
389 ENABLE_INTERRUPTS(CLBR_ANY) 490 ENABLE_INTERRUPTS(CLBR_ANY)
@@ -396,7 +497,7 @@ sysexit_audit:
396 DISABLE_INTERRUPTS(CLBR_ANY) 497 DISABLE_INTERRUPTS(CLBR_ANY)
397 TRACE_IRQS_OFF 498 TRACE_IRQS_OFF
398 movl TI_flags(%ebp), %ecx 499 movl TI_flags(%ebp), %ecx
399 testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx 500 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
400 jne syscall_exit_work 501 jne syscall_exit_work
401 movl PT_EAX(%esp),%eax /* reload syscall return value */ 502 movl PT_EAX(%esp),%eax /* reload syscall return value */
402 jmp sysenter_exit 503 jmp sysenter_exit
@@ -410,6 +511,7 @@ sysexit_audit:
410 .align 4 511 .align 4
411 .long 1b,2b 512 .long 1b,2b
412.popsection 513.popsection
514 PTGS_TO_GS_EX
413ENDPROC(ia32_sysenter_target) 515ENDPROC(ia32_sysenter_target)
414 516
415 # system call handler stub 517 # system call handler stub
@@ -420,8 +522,7 @@ ENTRY(system_call)
420 SAVE_ALL 522 SAVE_ALL
421 GET_THREAD_INFO(%ebp) 523 GET_THREAD_INFO(%ebp)
422 # system call tracing in operation / emulation 524 # system call tracing in operation / emulation
423 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ 525 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
424 testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
425 jnz syscall_trace_entry 526 jnz syscall_trace_entry
426 cmpl $(nr_syscalls), %eax 527 cmpl $(nr_syscalls), %eax
427 jae syscall_badsys 528 jae syscall_badsys
@@ -435,7 +536,7 @@ syscall_exit:
435 # between sampling and the iret 536 # between sampling and the iret
436 TRACE_IRQS_OFF 537 TRACE_IRQS_OFF
437 movl TI_flags(%ebp), %ecx 538 movl TI_flags(%ebp), %ecx
438 testw $_TIF_ALLWORK_MASK, %cx # current->work 539 testl $_TIF_ALLWORK_MASK, %ecx # current->work
439 jne syscall_exit_work 540 jne syscall_exit_work
440 541
441restore_all: 542restore_all:
@@ -452,8 +553,7 @@ restore_all:
452restore_nocheck: 553restore_nocheck:
453 TRACE_IRQS_IRET 554 TRACE_IRQS_IRET
454restore_nocheck_notrace: 555restore_nocheck_notrace:
455 RESTORE_REGS 556 RESTORE_REGS 4 # skip orig_eax/error_code
456 addl $4, %esp # skip orig_eax/error_code
457 CFI_ADJUST_CFA_OFFSET -4 557 CFI_ADJUST_CFA_OFFSET -4
458irq_return: 558irq_return:
459 INTERRUPT_RETURN 559 INTERRUPT_RETURN
@@ -571,7 +671,7 @@ END(syscall_trace_entry)
571 # perform syscall exit tracing 671 # perform syscall exit tracing
572 ALIGN 672 ALIGN
573syscall_exit_work: 673syscall_exit_work:
574 testb $_TIF_WORK_SYSCALL_EXIT, %cl 674 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
575 jz work_pending 675 jz work_pending
576 TRACE_IRQS_ON 676 TRACE_IRQS_ON
577 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call 677 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
@@ -595,28 +695,50 @@ syscall_badsys:
595END(syscall_badsys) 695END(syscall_badsys)
596 CFI_ENDPROC 696 CFI_ENDPROC
597 697
598#define FIXUP_ESPFIX_STACK \ 698/*
599 /* since we are on a wrong stack, we cant make it a C code :( */ \ 699 * System calls that need a pt_regs pointer.
600 PER_CPU(gdt_page, %ebx); \ 700 */
601 GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \ 701#define PTREGSCALL(name) \
602 addl %esp, %eax; \ 702 ALIGN; \
603 pushl $__KERNEL_DS; \ 703ptregs_##name: \
604 CFI_ADJUST_CFA_OFFSET 4; \ 704 leal 4(%esp),%eax; \
605 pushl %eax; \ 705 jmp sys_##name;
606 CFI_ADJUST_CFA_OFFSET 4; \ 706
607 lss (%esp), %esp; \ 707PTREGSCALL(iopl)
608 CFI_ADJUST_CFA_OFFSET -8; 708PTREGSCALL(fork)
609#define UNWIND_ESPFIX_STACK \ 709PTREGSCALL(clone)
610 movl %ss, %eax; \ 710PTREGSCALL(vfork)
611 /* see if on espfix stack */ \ 711PTREGSCALL(execve)
612 cmpw $__ESPFIX_SS, %ax; \ 712PTREGSCALL(sigaltstack)
613 jne 27f; \ 713PTREGSCALL(sigreturn)
614 movl $__KERNEL_DS, %eax; \ 714PTREGSCALL(rt_sigreturn)
615 movl %eax, %ds; \ 715PTREGSCALL(vm86)
616 movl %eax, %es; \ 716PTREGSCALL(vm86old)
617 /* switch to normal stack */ \ 717
618 FIXUP_ESPFIX_STACK; \ 718.macro FIXUP_ESPFIX_STACK
61927:; 719 /* since we are on a wrong stack, we cant make it a C code :( */
720 PER_CPU(gdt_page, %ebx)
721 GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
722 addl %esp, %eax
723 pushl $__KERNEL_DS
724 CFI_ADJUST_CFA_OFFSET 4
725 pushl %eax
726 CFI_ADJUST_CFA_OFFSET 4
727 lss (%esp), %esp
728 CFI_ADJUST_CFA_OFFSET -8
729.endm
730.macro UNWIND_ESPFIX_STACK
731 movl %ss, %eax
732 /* see if on espfix stack */
733 cmpw $__ESPFIX_SS, %ax
734 jne 27f
735 movl $__KERNEL_DS, %eax
736 movl %eax, %ds
737 movl %eax, %es
738 /* switch to normal stack */
739 FIXUP_ESPFIX_STACK
74027:
741.endm
620 742
621/* 743/*
622 * Build the entry stubs and pointer table with some assembler magic. 744 * Build the entry stubs and pointer table with some assembler magic.
@@ -672,7 +794,7 @@ common_interrupt:
672ENDPROC(common_interrupt) 794ENDPROC(common_interrupt)
673 CFI_ENDPROC 795 CFI_ENDPROC
674 796
675#define BUILD_INTERRUPT(name, nr) \ 797#define BUILD_INTERRUPT3(name, nr, fn) \
676ENTRY(name) \ 798ENTRY(name) \
677 RING0_INT_FRAME; \ 799 RING0_INT_FRAME; \
678 pushl $~(nr); \ 800 pushl $~(nr); \
@@ -680,13 +802,15 @@ ENTRY(name) \
680 SAVE_ALL; \ 802 SAVE_ALL; \
681 TRACE_IRQS_OFF \ 803 TRACE_IRQS_OFF \
682 movl %esp,%eax; \ 804 movl %esp,%eax; \
683 call smp_##name; \ 805 call fn; \
684 jmp ret_from_intr; \ 806 jmp ret_from_intr; \
685 CFI_ENDPROC; \ 807 CFI_ENDPROC; \
686ENDPROC(name) 808ENDPROC(name)
687 809
810#define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
811
688/* The include is where all of the SMP etc. interrupts come from */ 812/* The include is where all of the SMP etc. interrupts come from */
689#include "entry_arch.h" 813#include <asm/entry_arch.h>
690 814
691ENTRY(coprocessor_error) 815ENTRY(coprocessor_error)
692 RING0_INT_FRAME 816 RING0_INT_FRAME
@@ -1068,7 +1192,10 @@ ENTRY(page_fault)
1068 CFI_ADJUST_CFA_OFFSET 4 1192 CFI_ADJUST_CFA_OFFSET 4
1069 ALIGN 1193 ALIGN
1070error_code: 1194error_code:
1071 /* the function address is in %fs's slot on the stack */ 1195 /* the function address is in %gs's slot on the stack */
1196 pushl %fs
1197 CFI_ADJUST_CFA_OFFSET 4
1198 /*CFI_REL_OFFSET fs, 0*/
1072 pushl %es 1199 pushl %es
1073 CFI_ADJUST_CFA_OFFSET 4 1200 CFI_ADJUST_CFA_OFFSET 4
1074 /*CFI_REL_OFFSET es, 0*/ 1201 /*CFI_REL_OFFSET es, 0*/
@@ -1097,20 +1224,15 @@ error_code:
1097 CFI_ADJUST_CFA_OFFSET 4 1224 CFI_ADJUST_CFA_OFFSET 4
1098 CFI_REL_OFFSET ebx, 0 1225 CFI_REL_OFFSET ebx, 0
1099 cld 1226 cld
1100 pushl %fs
1101 CFI_ADJUST_CFA_OFFSET 4
1102 /*CFI_REL_OFFSET fs, 0*/
1103 movl $(__KERNEL_PERCPU), %ecx 1227 movl $(__KERNEL_PERCPU), %ecx
1104 movl %ecx, %fs 1228 movl %ecx, %fs
1105 UNWIND_ESPFIX_STACK 1229 UNWIND_ESPFIX_STACK
1106 popl %ecx 1230 GS_TO_REG %ecx
1107 CFI_ADJUST_CFA_OFFSET -4 1231 movl PT_GS(%esp), %edi # get the function address
1108 /*CFI_REGISTER es, ecx*/
1109 movl PT_FS(%esp), %edi # get the function address
1110 movl PT_ORIG_EAX(%esp), %edx # get the error code 1232 movl PT_ORIG_EAX(%esp), %edx # get the error code
1111 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart 1233 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1112 mov %ecx, PT_FS(%esp) 1234 REG_TO_PTGS %ecx
1113 /*CFI_REL_OFFSET fs, ES*/ 1235 SET_KERNEL_GS %ecx
1114 movl $(__USER_DS), %ecx 1236 movl $(__USER_DS), %ecx
1115 movl %ecx, %ds 1237 movl %ecx, %ds
1116 movl %ecx, %es 1238 movl %ecx, %es
@@ -1134,26 +1256,27 @@ END(page_fault)
1134 * by hand onto the new stack - while updating the return eip past 1256 * by hand onto the new stack - while updating the return eip past
1135 * the instruction that would have done it for sysenter. 1257 * the instruction that would have done it for sysenter.
1136 */ 1258 */
1137#define FIX_STACK(offset, ok, label) \ 1259.macro FIX_STACK offset ok label
1138 cmpw $__KERNEL_CS,4(%esp); \ 1260 cmpw $__KERNEL_CS, 4(%esp)
1139 jne ok; \ 1261 jne \ok
1140label: \ 1262\label:
1141 movl TSS_sysenter_sp0+offset(%esp),%esp; \ 1263 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1142 CFI_DEF_CFA esp, 0; \ 1264 CFI_DEF_CFA esp, 0
1143 CFI_UNDEFINED eip; \ 1265 CFI_UNDEFINED eip
1144 pushfl; \ 1266 pushfl
1145 CFI_ADJUST_CFA_OFFSET 4; \ 1267 CFI_ADJUST_CFA_OFFSET 4
1146 pushl $__KERNEL_CS; \ 1268 pushl $__KERNEL_CS
1147 CFI_ADJUST_CFA_OFFSET 4; \ 1269 CFI_ADJUST_CFA_OFFSET 4
1148 pushl $sysenter_past_esp; \ 1270 pushl $sysenter_past_esp
1149 CFI_ADJUST_CFA_OFFSET 4; \ 1271 CFI_ADJUST_CFA_OFFSET 4
1150 CFI_REL_OFFSET eip, 0 1272 CFI_REL_OFFSET eip, 0
1273.endm
1151 1274
1152ENTRY(debug) 1275ENTRY(debug)
1153 RING0_INT_FRAME 1276 RING0_INT_FRAME
1154 cmpl $ia32_sysenter_target,(%esp) 1277 cmpl $ia32_sysenter_target,(%esp)
1155 jne debug_stack_correct 1278 jne debug_stack_correct
1156 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) 1279 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1157debug_stack_correct: 1280debug_stack_correct:
1158 pushl $-1 # mark this as an int 1281 pushl $-1 # mark this as an int
1159 CFI_ADJUST_CFA_OFFSET 4 1282 CFI_ADJUST_CFA_OFFSET 4
@@ -1211,7 +1334,7 @@ nmi_stack_correct:
1211 1334
1212nmi_stack_fixup: 1335nmi_stack_fixup:
1213 RING0_INT_FRAME 1336 RING0_INT_FRAME
1214 FIX_STACK(12,nmi_stack_correct, 1) 1337 FIX_STACK 12, nmi_stack_correct, 1
1215 jmp nmi_stack_correct 1338 jmp nmi_stack_correct
1216 1339
1217nmi_debug_stack_check: 1340nmi_debug_stack_check:
@@ -1222,7 +1345,7 @@ nmi_debug_stack_check:
1222 jb nmi_stack_correct 1345 jb nmi_stack_correct
1223 cmpl $debug_esp_fix_insn,(%esp) 1346 cmpl $debug_esp_fix_insn,(%esp)
1224 ja nmi_stack_correct 1347 ja nmi_stack_correct
1225 FIX_STACK(24,nmi_stack_correct, 1) 1348 FIX_STACK 24, nmi_stack_correct, 1
1226 jmp nmi_stack_correct 1349 jmp nmi_stack_correct
1227 1350
1228nmi_espfix_stack: 1351nmi_espfix_stack:
@@ -1234,7 +1357,7 @@ nmi_espfix_stack:
1234 CFI_ADJUST_CFA_OFFSET 4 1357 CFI_ADJUST_CFA_OFFSET 4
1235 pushl %esp 1358 pushl %esp
1236 CFI_ADJUST_CFA_OFFSET 4 1359 CFI_ADJUST_CFA_OFFSET 4
1237 addw $4, (%esp) 1360 addl $4, (%esp)
1238 /* copy the iret frame of 12 bytes */ 1361 /* copy the iret frame of 12 bytes */
1239 .rept 3 1362 .rept 3
1240 pushl 16(%esp) 1363 pushl 16(%esp)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index a1346217e43..a331ec38af9 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -48,10 +48,11 @@
48#include <asm/unistd.h> 48#include <asm/unistd.h>
49#include <asm/thread_info.h> 49#include <asm/thread_info.h>
50#include <asm/hw_irq.h> 50#include <asm/hw_irq.h>
51#include <asm/page.h> 51#include <asm/page_types.h>
52#include <asm/irqflags.h> 52#include <asm/irqflags.h>
53#include <asm/paravirt.h> 53#include <asm/paravirt.h>
54#include <asm/ftrace.h> 54#include <asm/ftrace.h>
55#include <asm/percpu.h>
55 56
56/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ 57/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57#include <linux/elf-em.h> 58#include <linux/elf-em.h>
@@ -76,20 +77,17 @@ ENTRY(ftrace_caller)
76 movq 8(%rbp), %rsi 77 movq 8(%rbp), %rsi
77 subq $MCOUNT_INSN_SIZE, %rdi 78 subq $MCOUNT_INSN_SIZE, %rdi
78 79
79.globl ftrace_call 80GLOBAL(ftrace_call)
80ftrace_call:
81 call ftrace_stub 81 call ftrace_stub
82 82
83 MCOUNT_RESTORE_FRAME 83 MCOUNT_RESTORE_FRAME
84 84
85#ifdef CONFIG_FUNCTION_GRAPH_TRACER 85#ifdef CONFIG_FUNCTION_GRAPH_TRACER
86.globl ftrace_graph_call 86GLOBAL(ftrace_graph_call)
87ftrace_graph_call:
88 jmp ftrace_stub 87 jmp ftrace_stub
89#endif 88#endif
90 89
91.globl ftrace_stub 90GLOBAL(ftrace_stub)
92ftrace_stub:
93 retq 91 retq
94END(ftrace_caller) 92END(ftrace_caller)
95 93
@@ -109,8 +107,7 @@ ENTRY(mcount)
109 jnz ftrace_graph_caller 107 jnz ftrace_graph_caller
110#endif 108#endif
111 109
112.globl ftrace_stub 110GLOBAL(ftrace_stub)
113ftrace_stub:
114 retq 111 retq
115 112
116trace: 113trace:
@@ -147,9 +144,7 @@ ENTRY(ftrace_graph_caller)
147 retq 144 retq
148END(ftrace_graph_caller) 145END(ftrace_graph_caller)
149 146
150 147GLOBAL(return_to_handler)
151.globl return_to_handler
152return_to_handler:
153 subq $80, %rsp 148 subq $80, %rsp
154 149
155 movq %rax, (%rsp) 150 movq %rax, (%rsp)
@@ -187,6 +182,7 @@ return_to_handler:
187ENTRY(native_usergs_sysret64) 182ENTRY(native_usergs_sysret64)
188 swapgs 183 swapgs
189 sysretq 184 sysretq
185ENDPROC(native_usergs_sysret64)
190#endif /* CONFIG_PARAVIRT */ 186#endif /* CONFIG_PARAVIRT */
191 187
192 188
@@ -209,7 +205,7 @@ ENTRY(native_usergs_sysret64)
209 205
210 /* %rsp:at FRAMEEND */ 206 /* %rsp:at FRAMEEND */
211 .macro FIXUP_TOP_OF_STACK tmp offset=0 207 .macro FIXUP_TOP_OF_STACK tmp offset=0
212 movq %gs:pda_oldrsp,\tmp 208 movq PER_CPU_VAR(old_rsp),\tmp
213 movq \tmp,RSP+\offset(%rsp) 209 movq \tmp,RSP+\offset(%rsp)
214 movq $__USER_DS,SS+\offset(%rsp) 210 movq $__USER_DS,SS+\offset(%rsp)
215 movq $__USER_CS,CS+\offset(%rsp) 211 movq $__USER_CS,CS+\offset(%rsp)
@@ -220,7 +216,7 @@ ENTRY(native_usergs_sysret64)
220 216
221 .macro RESTORE_TOP_OF_STACK tmp offset=0 217 .macro RESTORE_TOP_OF_STACK tmp offset=0
222 movq RSP+\offset(%rsp),\tmp 218 movq RSP+\offset(%rsp),\tmp
223 movq \tmp,%gs:pda_oldrsp 219 movq \tmp,PER_CPU_VAR(old_rsp)
224 movq EFLAGS+\offset(%rsp),\tmp 220 movq EFLAGS+\offset(%rsp),\tmp
225 movq \tmp,R11+\offset(%rsp) 221 movq \tmp,R11+\offset(%rsp)
226 .endm 222 .endm
@@ -336,15 +332,15 @@ ENTRY(save_args)
336 je 1f 332 je 1f
337 SWAPGS 333 SWAPGS
338 /* 334 /*
339 * irqcount is used to check if a CPU is already on an interrupt stack 335 * irq_count is used to check if a CPU is already on an interrupt stack
340 * or not. While this is essentially redundant with preempt_count it is 336 * or not. While this is essentially redundant with preempt_count it is
341 * a little cheaper to use a separate counter in the PDA (short of 337 * a little cheaper to use a separate counter in the PDA (short of
342 * moving irq_enter into assembly, which would be too much work) 338 * moving irq_enter into assembly, which would be too much work)
343 */ 339 */
3441: incl %gs:pda_irqcount 3401: incl PER_CPU_VAR(irq_count)
345 jne 2f 341 jne 2f
346 popq_cfi %rax /* move return address... */ 342 popq_cfi %rax /* move return address... */
347 mov %gs:pda_irqstackptr,%rsp 343 mov PER_CPU_VAR(irq_stack_ptr),%rsp
348 EMPTY_FRAME 0 344 EMPTY_FRAME 0
349 pushq_cfi %rbp /* backlink for unwinder */ 345 pushq_cfi %rbp /* backlink for unwinder */
350 pushq_cfi %rax /* ... to the new stack */ 346 pushq_cfi %rax /* ... to the new stack */
@@ -372,6 +368,7 @@ ENTRY(save_rest)
372END(save_rest) 368END(save_rest)
373 369
374/* save complete stack frame */ 370/* save complete stack frame */
371 .pushsection .kprobes.text, "ax"
375ENTRY(save_paranoid) 372ENTRY(save_paranoid)
376 XCPT_FRAME 1 RDI+8 373 XCPT_FRAME 1 RDI+8
377 cld 374 cld
@@ -400,6 +397,7 @@ ENTRY(save_paranoid)
4001: ret 3971: ret
401 CFI_ENDPROC 398 CFI_ENDPROC
402END(save_paranoid) 399END(save_paranoid)
400 .popsection
403 401
404/* 402/*
405 * A newly forked process directly context switches into this address. 403 * A newly forked process directly context switches into this address.
@@ -409,6 +407,8 @@ END(save_paranoid)
409ENTRY(ret_from_fork) 407ENTRY(ret_from_fork)
410 DEFAULT_FRAME 408 DEFAULT_FRAME
411 409
410 LOCK ; btr $TIF_FORK,TI_flags(%r8)
411
412 push kernel_eflags(%rip) 412 push kernel_eflags(%rip)
413 CFI_ADJUST_CFA_OFFSET 8 413 CFI_ADJUST_CFA_OFFSET 8
414 popf # reset kernel eflags 414 popf # reset kernel eflags
@@ -418,7 +418,6 @@ ENTRY(ret_from_fork)
418 418
419 GET_THREAD_INFO(%rcx) 419 GET_THREAD_INFO(%rcx)
420 420
421 CFI_REMEMBER_STATE
422 RESTORE_REST 421 RESTORE_REST
423 422
424 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? 423 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
@@ -430,7 +429,6 @@ ENTRY(ret_from_fork)
430 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET 429 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
431 jmp ret_from_sys_call # go to the SYSRET fastpath 430 jmp ret_from_sys_call # go to the SYSRET fastpath
432 431
433 CFI_RESTORE_STATE
434 CFI_ENDPROC 432 CFI_ENDPROC
435END(ret_from_fork) 433END(ret_from_fork)
436 434
@@ -468,7 +466,7 @@ END(ret_from_fork)
468ENTRY(system_call) 466ENTRY(system_call)
469 CFI_STARTPROC simple 467 CFI_STARTPROC simple
470 CFI_SIGNAL_FRAME 468 CFI_SIGNAL_FRAME
471 CFI_DEF_CFA rsp,PDA_STACKOFFSET 469 CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
472 CFI_REGISTER rip,rcx 470 CFI_REGISTER rip,rcx
473 /*CFI_REGISTER rflags,r11*/ 471 /*CFI_REGISTER rflags,r11*/
474 SWAPGS_UNSAFE_STACK 472 SWAPGS_UNSAFE_STACK
@@ -479,8 +477,8 @@ ENTRY(system_call)
479 */ 477 */
480ENTRY(system_call_after_swapgs) 478ENTRY(system_call_after_swapgs)
481 479
482 movq %rsp,%gs:pda_oldrsp 480 movq %rsp,PER_CPU_VAR(old_rsp)
483 movq %gs:pda_kernelstack,%rsp 481 movq PER_CPU_VAR(kernel_stack),%rsp
484 /* 482 /*
485 * No need to follow this irqs off/on section - it's straight 483 * No need to follow this irqs off/on section - it's straight
486 * and short: 484 * and short:
@@ -523,7 +521,7 @@ sysret_check:
523 CFI_REGISTER rip,rcx 521 CFI_REGISTER rip,rcx
524 RESTORE_ARGS 0,-ARG_SKIP,1 522 RESTORE_ARGS 0,-ARG_SKIP,1
525 /*CFI_REGISTER rflags,r11*/ 523 /*CFI_REGISTER rflags,r11*/
526 movq %gs:pda_oldrsp, %rsp 524 movq PER_CPU_VAR(old_rsp), %rsp
527 USERGS_SYSRET64 525 USERGS_SYSRET64
528 526
529 CFI_RESTORE_STATE 527 CFI_RESTORE_STATE
@@ -630,16 +628,14 @@ tracesys:
630 * Syscall return path ending with IRET. 628 * Syscall return path ending with IRET.
631 * Has correct top of stack, but partial stack frame. 629 * Has correct top of stack, but partial stack frame.
632 */ 630 */
633 .globl int_ret_from_sys_call 631GLOBAL(int_ret_from_sys_call)
634 .globl int_with_check
635int_ret_from_sys_call:
636 DISABLE_INTERRUPTS(CLBR_NONE) 632 DISABLE_INTERRUPTS(CLBR_NONE)
637 TRACE_IRQS_OFF 633 TRACE_IRQS_OFF
638 testl $3,CS-ARGOFFSET(%rsp) 634 testl $3,CS-ARGOFFSET(%rsp)
639 je retint_restore_args 635 je retint_restore_args
640 movl $_TIF_ALLWORK_MASK,%edi 636 movl $_TIF_ALLWORK_MASK,%edi
641 /* edi: mask to check */ 637 /* edi: mask to check */
642int_with_check: 638GLOBAL(int_with_check)
643 LOCKDEP_SYS_EXIT_IRQ 639 LOCKDEP_SYS_EXIT_IRQ
644 GET_THREAD_INFO(%rcx) 640 GET_THREAD_INFO(%rcx)
645 movl TI_flags(%rcx),%edx 641 movl TI_flags(%rcx),%edx
@@ -833,11 +829,11 @@ common_interrupt:
833 XCPT_FRAME 829 XCPT_FRAME
834 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ 830 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
835 interrupt do_IRQ 831 interrupt do_IRQ
836 /* 0(%rsp): oldrsp-ARGOFFSET */ 832 /* 0(%rsp): old_rsp-ARGOFFSET */
837ret_from_intr: 833ret_from_intr:
838 DISABLE_INTERRUPTS(CLBR_NONE) 834 DISABLE_INTERRUPTS(CLBR_NONE)
839 TRACE_IRQS_OFF 835 TRACE_IRQS_OFF
840 decl %gs:pda_irqcount 836 decl PER_CPU_VAR(irq_count)
841 leaveq 837 leaveq
842 CFI_DEF_CFA_REGISTER rsp 838 CFI_DEF_CFA_REGISTER rsp
843 CFI_ADJUST_CFA_OFFSET -8 839 CFI_ADJUST_CFA_OFFSET -8
@@ -982,10 +978,14 @@ apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
982 irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt 978 irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
983#endif 979#endif
984 980
981#ifdef CONFIG_X86_UV
985apicinterrupt UV_BAU_MESSAGE \ 982apicinterrupt UV_BAU_MESSAGE \
986 uv_bau_message_intr1 uv_bau_message_interrupt 983 uv_bau_message_intr1 uv_bau_message_interrupt
984#endif
987apicinterrupt LOCAL_TIMER_VECTOR \ 985apicinterrupt LOCAL_TIMER_VECTOR \
988 apic_timer_interrupt smp_apic_timer_interrupt 986 apic_timer_interrupt smp_apic_timer_interrupt
987apicinterrupt GENERIC_INTERRUPT_VECTOR \
988 generic_interrupt smp_generic_interrupt
989 989
990#ifdef CONFIG_SMP 990#ifdef CONFIG_SMP
991apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \ 991apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \
@@ -1073,10 +1073,10 @@ ENTRY(\sym)
1073 TRACE_IRQS_OFF 1073 TRACE_IRQS_OFF
1074 movq %rsp,%rdi /* pt_regs pointer */ 1074 movq %rsp,%rdi /* pt_regs pointer */
1075 xorl %esi,%esi /* no error code */ 1075 xorl %esi,%esi /* no error code */
1076 movq %gs:pda_data_offset, %rbp 1076 PER_CPU(init_tss, %rbp)
1077 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) 1077 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1078 call \do_sym 1078 call \do_sym
1079 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) 1079 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1080 jmp paranoid_exit /* %ebx: no swapgs flag */ 1080 jmp paranoid_exit /* %ebx: no swapgs flag */
1081 CFI_ENDPROC 1081 CFI_ENDPROC
1082END(\sym) 1082END(\sym)
@@ -1138,7 +1138,7 @@ ENTRY(native_load_gs_index)
1138 CFI_STARTPROC 1138 CFI_STARTPROC
1139 pushf 1139 pushf
1140 CFI_ADJUST_CFA_OFFSET 8 1140 CFI_ADJUST_CFA_OFFSET 8
1141 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) 1141 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
1142 SWAPGS 1142 SWAPGS
1143gs_change: 1143gs_change:
1144 movl %edi,%gs 1144 movl %edi,%gs
@@ -1260,14 +1260,14 @@ ENTRY(call_softirq)
1260 CFI_REL_OFFSET rbp,0 1260 CFI_REL_OFFSET rbp,0
1261 mov %rsp,%rbp 1261 mov %rsp,%rbp
1262 CFI_DEF_CFA_REGISTER rbp 1262 CFI_DEF_CFA_REGISTER rbp
1263 incl %gs:pda_irqcount 1263 incl PER_CPU_VAR(irq_count)
1264 cmove %gs:pda_irqstackptr,%rsp 1264 cmove PER_CPU_VAR(irq_stack_ptr),%rsp
1265 push %rbp # backlink for old unwinder 1265 push %rbp # backlink for old unwinder
1266 call __do_softirq 1266 call __do_softirq
1267 leaveq 1267 leaveq
1268 CFI_DEF_CFA_REGISTER rsp 1268 CFI_DEF_CFA_REGISTER rsp
1269 CFI_ADJUST_CFA_OFFSET -8 1269 CFI_ADJUST_CFA_OFFSET -8
1270 decl %gs:pda_irqcount 1270 decl PER_CPU_VAR(irq_count)
1271 ret 1271 ret
1272 CFI_ENDPROC 1272 CFI_ENDPROC
1273END(call_softirq) 1273END(call_softirq)
@@ -1297,15 +1297,15 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1297 movq %rdi, %rsp # we don't return, adjust the stack frame 1297 movq %rdi, %rsp # we don't return, adjust the stack frame
1298 CFI_ENDPROC 1298 CFI_ENDPROC
1299 DEFAULT_FRAME 1299 DEFAULT_FRAME
130011: incl %gs:pda_irqcount 130011: incl PER_CPU_VAR(irq_count)
1301 movq %rsp,%rbp 1301 movq %rsp,%rbp
1302 CFI_DEF_CFA_REGISTER rbp 1302 CFI_DEF_CFA_REGISTER rbp
1303 cmovzq %gs:pda_irqstackptr,%rsp 1303 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
1304 pushq %rbp # backlink for old unwinder 1304 pushq %rbp # backlink for old unwinder
1305 call xen_evtchn_do_upcall 1305 call xen_evtchn_do_upcall
1306 popq %rsp 1306 popq %rsp
1307 CFI_DEF_CFA_REGISTER rsp 1307 CFI_DEF_CFA_REGISTER rsp
1308 decl %gs:pda_irqcount 1308 decl PER_CPU_VAR(irq_count)
1309 jmp error_exit 1309 jmp error_exit
1310 CFI_ENDPROC 1310 CFI_ENDPROC
1311END(do_hypervisor_callback) 1311END(do_hypervisor_callback)
diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c
deleted file mode 100644
index 53699c931ad..00000000000
--- a/arch/x86/kernel/es7000_32.c
+++ /dev/null
@@ -1,378 +0,0 @@
1/*
2 * Written by: Garry Forsgren, Unisys Corporation
3 * Natalie Protasevich, Unisys Corporation
4 * This file contains the code to configure and interface
5 * with Unisys ES7000 series hardware system manager.
6 *
7 * Copyright (c) 2003 Unisys Corporation. All Rights Reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it would be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
20 *
21 * Contact information: Unisys Corporation, Township Line & Union Meeting
22 * Roads-A, Unisys Way, Blue Bell, Pennsylvania, 19424, or:
23 *
24 * http://www.unisys.com
25 */
26
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/smp.h>
31#include <linux/string.h>
32#include <linux/spinlock.h>
33#include <linux/errno.h>
34#include <linux/notifier.h>
35#include <linux/reboot.h>
36#include <linux/init.h>
37#include <linux/acpi.h>
38#include <asm/io.h>
39#include <asm/nmi.h>
40#include <asm/smp.h>
41#include <asm/atomic.h>
42#include <asm/apicdef.h>
43#include <mach_mpparse.h>
44#include <asm/genapic.h>
45#include <asm/setup.h>
46
47/*
48 * ES7000 chipsets
49 */
50
51#define NON_UNISYS 0
52#define ES7000_CLASSIC 1
53#define ES7000_ZORRO 2
54
55
56#define MIP_REG 1
57#define MIP_PSAI_REG 4
58
59#define MIP_BUSY 1
60#define MIP_SPIN 0xf0000
61#define MIP_VALID 0x0100000000000000ULL
62#define MIP_PORT(VALUE) ((VALUE >> 32) & 0xffff)
63
64#define MIP_RD_LO(VALUE) (VALUE & 0xffffffff)
65
66struct mip_reg_info {
67 unsigned long long mip_info;
68 unsigned long long delivery_info;
69 unsigned long long host_reg;
70 unsigned long long mip_reg;
71};
72
73struct part_info {
74 unsigned char type;
75 unsigned char length;
76 unsigned char part_id;
77 unsigned char apic_mode;
78 unsigned long snum;
79 char ptype[16];
80 char sname[64];
81 char pname[64];
82};
83
84struct psai {
85 unsigned long long entry_type;
86 unsigned long long addr;
87 unsigned long long bep_addr;
88};
89
90struct es7000_mem_info {
91 unsigned char type;
92 unsigned char length;
93 unsigned char resv[6];
94 unsigned long long start;
95 unsigned long long size;
96};
97
98struct es7000_oem_table {
99 unsigned long long hdr;
100 struct mip_reg_info mip;
101 struct part_info pif;
102 struct es7000_mem_info shm;
103 struct psai psai;
104};
105
106#ifdef CONFIG_ACPI
107
108struct oem_table {
109 struct acpi_table_header Header;
110 u32 OEMTableAddr;
111 u32 OEMTableSize;
112};
113
114extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
115extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
116#endif
117
118struct mip_reg {
119 unsigned long long off_0;
120 unsigned long long off_8;
121 unsigned long long off_10;
122 unsigned long long off_18;
123 unsigned long long off_20;
124 unsigned long long off_28;
125 unsigned long long off_30;
126 unsigned long long off_38;
127};
128
129#define MIP_SW_APIC 0x1020b
130#define MIP_FUNC(VALUE) (VALUE & 0xff)
131
132/*
133 * ES7000 Globals
134 */
135
136static volatile unsigned long *psai = NULL;
137static struct mip_reg *mip_reg;
138static struct mip_reg *host_reg;
139static int mip_port;
140static unsigned long mip_addr, host_addr;
141
142int es7000_plat;
143
144/*
145 * GSI override for ES7000 platforms.
146 */
147
148static unsigned int base;
149
150static int
151es7000_rename_gsi(int ioapic, int gsi)
152{
153 if (es7000_plat == ES7000_ZORRO)
154 return gsi;
155
156 if (!base) {
157 int i;
158 for (i = 0; i < nr_ioapics; i++)
159 base += nr_ioapic_registers[i];
160 }
161
162 if (!ioapic && (gsi < 16))
163 gsi += base;
164 return gsi;
165}
166
167static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
168{
169 unsigned long vect = 0, psaival = 0;
170
171 if (psai == NULL)
172 return -1;
173
174 vect = ((unsigned long)__pa(eip)/0x1000) << 16;
175 psaival = (0x1000000 | vect | cpu);
176
177 while (*psai & 0x1000000)
178 ;
179
180 *psai = psaival;
181
182 return 0;
183}
184
185static void noop_wait_for_deassert(atomic_t *deassert_not_used)
186{
187}
188
189static int __init es7000_update_genapic(void)
190{
191 genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
192
193 /* MPENTIUMIII */
194 if (boot_cpu_data.x86 == 6 &&
195 (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) {
196 es7000_update_genapic_to_cluster();
197 genapic->wait_for_init_deassert = noop_wait_for_deassert;
198 genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
199 }
200
201 return 0;
202}
203
204void __init
205setup_unisys(void)
206{
207 /*
208 * Determine the generation of the ES7000 currently running.
209 *
210 * es7000_plat = 1 if the machine is a 5xx ES7000 box
211 * es7000_plat = 2 if the machine is a x86_64 ES7000 box
212 *
213 */
214 if (!(boot_cpu_data.x86 <= 15 && boot_cpu_data.x86_model <= 2))
215 es7000_plat = ES7000_ZORRO;
216 else
217 es7000_plat = ES7000_CLASSIC;
218 ioapic_renumber_irq = es7000_rename_gsi;
219
220 x86_quirks->update_genapic = es7000_update_genapic;
221}
222
223/*
224 * Parse the OEM Table
225 */
226
227int __init
228parse_unisys_oem (char *oemptr)
229{
230 int i;
231 int success = 0;
232 unsigned char type, size;
233 unsigned long val;
234 char *tp = NULL;
235 struct psai *psaip = NULL;
236 struct mip_reg_info *mi;
237 struct mip_reg *host, *mip;
238
239 tp = oemptr;
240
241 tp += 8;
242
243 for (i=0; i <= 6; i++) {
244 type = *tp++;
245 size = *tp++;
246 tp -= 2;
247 switch (type) {
248 case MIP_REG:
249 mi = (struct mip_reg_info *)tp;
250 val = MIP_RD_LO(mi->host_reg);
251 host_addr = val;
252 host = (struct mip_reg *)val;
253 host_reg = __va(host);
254 val = MIP_RD_LO(mi->mip_reg);
255 mip_port = MIP_PORT(mi->mip_info);
256 mip_addr = val;
257 mip = (struct mip_reg *)val;
258 mip_reg = __va(mip);
259 pr_debug("es7000_mipcfg: host_reg = 0x%lx \n",
260 (unsigned long)host_reg);
261 pr_debug("es7000_mipcfg: mip_reg = 0x%lx \n",
262 (unsigned long)mip_reg);
263 success++;
264 break;
265 case MIP_PSAI_REG:
266 psaip = (struct psai *)tp;
267 if (tp != NULL) {
268 if (psaip->addr)
269 psai = __va(psaip->addr);
270 else
271 psai = NULL;
272 success++;
273 }
274 break;
275 default:
276 break;
277 }
278 tp += size;
279 }
280
281 if (success < 2) {
282 es7000_plat = NON_UNISYS;
283 } else
284 setup_unisys();
285 return es7000_plat;
286}
287
288#ifdef CONFIG_ACPI
289static unsigned long oem_addrX;
290static unsigned long oem_size;
291int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
292{
293 struct acpi_table_header *header = NULL;
294 int i = 0;
295
296 while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
297 if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
298 struct oem_table *t = (struct oem_table *)header;
299
300 oem_addrX = t->OEMTableAddr;
301 oem_size = t->OEMTableSize;
302
303 *oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
304 oem_size);
305 return 0;
306 }
307 }
308 return -1;
309}
310
311void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
312{
313}
314#endif
315
316static void
317es7000_spin(int n)
318{
319 int i = 0;
320
321 while (i++ < n)
322 rep_nop();
323}
324
325static int __init
326es7000_mip_write(struct mip_reg *mip_reg)
327{
328 int status = 0;
329 int spin;
330
331 spin = MIP_SPIN;
332 while (((unsigned long long)host_reg->off_38 &
333 (unsigned long long)MIP_VALID) != 0) {
334 if (--spin <= 0) {
335 printk("es7000_mip_write: Timeout waiting for Host Valid Flag");
336 return -1;
337 }
338 es7000_spin(MIP_SPIN);
339 }
340
341 memcpy(host_reg, mip_reg, sizeof(struct mip_reg));
342 outb(1, mip_port);
343
344 spin = MIP_SPIN;
345
346 while (((unsigned long long)mip_reg->off_38 &
347 (unsigned long long)MIP_VALID) == 0) {
348 if (--spin <= 0) {
349 printk("es7000_mip_write: Timeout waiting for MIP Valid Flag");
350 return -1;
351 }
352 es7000_spin(MIP_SPIN);
353 }
354
355 status = ((unsigned long long)mip_reg->off_0 &
356 (unsigned long long)0xffff0000000000ULL) >> 48;
357 mip_reg->off_38 = ((unsigned long long)mip_reg->off_38 &
358 (unsigned long long)~MIP_VALID);
359 return status;
360}
361
362void __init
363es7000_sw_apic(void)
364{
365 if (es7000_plat) {
366 int mip_status;
367 struct mip_reg es7000_mip_reg;
368
369 printk("ES7000: Enabling APIC mode.\n");
370 memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
371 es7000_mip_reg.off_0 = MIP_SW_APIC;
372 es7000_mip_reg.off_38 = (MIP_VALID);
373 while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0)
374 printk("es7000_sw_apic: command failed, status = %x\n",
375 mip_status);
376 return;
377 }
378}
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index b9a4d8c4b93..f5b27224769 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -26,27 +26,6 @@
26#include <asm/bios_ebda.h> 26#include <asm/bios_ebda.h>
27#include <asm/trampoline.h> 27#include <asm/trampoline.h>
28 28
29/* boot cpu pda */
30static struct x8664_pda _boot_cpu_pda;
31
32#ifdef CONFIG_SMP
33/*
34 * We install an empty cpu_pda pointer table to indicate to early users
35 * (numa_set_node) that the cpu_pda pointer table for cpus other than
36 * the boot cpu is not yet setup.
37 */
38static struct x8664_pda *__cpu_pda[NR_CPUS] __initdata;
39#else
40static struct x8664_pda *__cpu_pda[NR_CPUS] __read_mostly;
41#endif
42
43void __init x86_64_init_pda(void)
44{
45 _cpu_pda = __cpu_pda;
46 cpu_pda(0) = &_boot_cpu_pda;
47 pda_init(0);
48}
49
50static void __init zap_identity_mappings(void) 29static void __init zap_identity_mappings(void)
51{ 30{
52 pgd_t *pgd = pgd_offset_k(0UL); 31 pgd_t *pgd = pgd_offset_k(0UL);
@@ -112,8 +91,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
112 if (console_loglevel == 10) 91 if (console_loglevel == 10)
113 early_printk("Kernel alive\n"); 92 early_printk("Kernel alive\n");
114 93
115 x86_64_init_pda();
116
117 x86_64_start_reservations(real_mode_data); 94 x86_64_start_reservations(real_mode_data);
118} 95}
119 96
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index e835b4eea70..c32ca19d591 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -11,14 +11,15 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/segment.h> 13#include <asm/segment.h>
14#include <asm/page.h> 14#include <asm/page_types.h>
15#include <asm/pgtable.h> 15#include <asm/pgtable_types.h>
16#include <asm/desc.h> 16#include <asm/desc.h>
17#include <asm/cache.h> 17#include <asm/cache.h>
18#include <asm/thread_info.h> 18#include <asm/thread_info.h>
19#include <asm/asm-offsets.h> 19#include <asm/asm-offsets.h>
20#include <asm/setup.h> 20#include <asm/setup.h>
21#include <asm/processor-flags.h> 21#include <asm/processor-flags.h>
22#include <asm/percpu.h>
22 23
23/* Physical address */ 24/* Physical address */
24#define pa(X) ((X) - __PAGE_OFFSET) 25#define pa(X) ((X) - __PAGE_OFFSET)
@@ -429,14 +430,34 @@ is386: movl $2,%ecx # set MP
429 ljmp $(__KERNEL_CS),$1f 430 ljmp $(__KERNEL_CS),$1f
4301: movl $(__KERNEL_DS),%eax # reload all the segment registers 4311: movl $(__KERNEL_DS),%eax # reload all the segment registers
431 movl %eax,%ss # after changing gdt. 432 movl %eax,%ss # after changing gdt.
432 movl %eax,%fs # gets reset once there's real percpu
433 433
434 movl $(__USER_DS),%eax # DS/ES contains default USER segment 434 movl $(__USER_DS),%eax # DS/ES contains default USER segment
435 movl %eax,%ds 435 movl %eax,%ds
436 movl %eax,%es 436 movl %eax,%es
437 437
438 xorl %eax,%eax # Clear GS and LDT 438 movl $(__KERNEL_PERCPU), %eax
439 movl %eax,%fs # set this cpu's percpu
440
441#ifdef CONFIG_CC_STACKPROTECTOR
442 /*
443 * The linker can't handle this by relocation. Manually set
444 * base address in stack canary segment descriptor.
445 */
446 cmpb $0,ready
447 jne 1f
448 movl $per_cpu__gdt_page,%eax
449 movl $per_cpu__stack_canary,%ecx
450 subl $20, %ecx
451 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
452 shrl $16, %ecx
453 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
454 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
4551:
456#endif
457 movl $(__KERNEL_STACK_CANARY),%eax
439 movl %eax,%gs 458 movl %eax,%gs
459
460 xorl %eax,%eax # Clear LDT
440 lldt %ax 461 lldt %ax
441 462
442 cld # gcc2 wants the direction flag cleared at all times 463 cld # gcc2 wants the direction flag cleared at all times
@@ -446,8 +467,6 @@ is386: movl $2,%ecx # set MP
446 movb $1, ready 467 movb $1, ready
447 cmpb $0,%cl # the first CPU calls start_kernel 468 cmpb $0,%cl # the first CPU calls start_kernel
448 je 1f 469 je 1f
449 movl $(__KERNEL_PERCPU), %eax
450 movl %eax,%fs # set this cpu's percpu
451 movl (stack_start), %esp 470 movl (stack_start), %esp
4521: 4711:
453#endif /* CONFIG_SMP */ 472#endif /* CONFIG_SMP */
@@ -548,12 +567,8 @@ early_fault:
548 pushl %eax 567 pushl %eax
549 pushl %edx /* trapno */ 568 pushl %edx /* trapno */
550 pushl $fault_msg 569 pushl $fault_msg
551#ifdef CONFIG_EARLY_PRINTK
552 call early_printk
553#else
554 call printk 570 call printk
555#endif 571#endif
556#endif
557 call dump_stack 572 call dump_stack
558hlt_loop: 573hlt_loop:
559 hlt 574 hlt
@@ -580,11 +595,10 @@ ignore_int:
580 pushl 32(%esp) 595 pushl 32(%esp)
581 pushl 40(%esp) 596 pushl 40(%esp)
582 pushl $int_msg 597 pushl $int_msg
583#ifdef CONFIG_EARLY_PRINTK
584 call early_printk
585#else
586 call printk 598 call printk
587#endif 599
600 call dump_stack
601
588 addl $(5*4),%esp 602 addl $(5*4),%esp
589 popl %ds 603 popl %ds
590 popl %es 604 popl %es
@@ -660,7 +674,7 @@ early_recursion_flag:
660 .long 0 674 .long 0
661 675
662int_msg: 676int_msg:
663 .asciz "Unknown interrupt or fault at EIP %p %p %p\n" 677 .asciz "Unknown interrupt or fault at: %p %p %p\n"
664 678
665fault_msg: 679fault_msg:
666/* fault info: */ 680/* fault info: */
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 0e275d49556..54b29bb24e7 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -19,6 +19,7 @@
19#include <asm/msr.h> 19#include <asm/msr.h>
20#include <asm/cache.h> 20#include <asm/cache.h>
21#include <asm/processor-flags.h> 21#include <asm/processor-flags.h>
22#include <asm/percpu.h>
22 23
23#ifdef CONFIG_PARAVIRT 24#ifdef CONFIG_PARAVIRT
24#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
@@ -226,12 +227,15 @@ ENTRY(secondary_startup_64)
226 movl %eax,%fs 227 movl %eax,%fs
227 movl %eax,%gs 228 movl %eax,%gs
228 229
229 /* 230 /* Set up %gs.
230 * Setup up a dummy PDA. this is just for some early bootup code 231 *
231 * that does in_interrupt() 232 * The base of %gs always points to the bottom of the irqstack
232 */ 233 * union. If the stack protector canary is enabled, it is
234 * located at %gs:40. Note that, on SMP, the boot cpu uses
235 * init data section till per cpu areas are set up.
236 */
233 movl $MSR_GS_BASE,%ecx 237 movl $MSR_GS_BASE,%ecx
234 movq $empty_zero_page,%rax 238 movq initial_gs(%rip),%rax
235 movq %rax,%rdx 239 movq %rax,%rdx
236 shrq $32,%rdx 240 shrq $32,%rdx
237 wrmsr 241 wrmsr
@@ -257,6 +261,8 @@ ENTRY(secondary_startup_64)
257 .align 8 261 .align 8
258 ENTRY(initial_code) 262 ENTRY(initial_code)
259 .quad x86_64_start_kernel 263 .quad x86_64_start_kernel
264 ENTRY(initial_gs)
265 .quad INIT_PER_CPU_VAR(irq_stack_union)
260 __FINITDATA 266 __FINITDATA
261 267
262 ENTRY(stack_start) 268 ENTRY(stack_start)
@@ -323,8 +329,6 @@ early_idt_ripmsg:
323#endif /* CONFIG_EARLY_PRINTK */ 329#endif /* CONFIG_EARLY_PRINTK */
324 .previous 330 .previous
325 331
326.balign PAGE_SIZE
327
328#define NEXT_PAGE(name) \ 332#define NEXT_PAGE(name) \
329 .balign PAGE_SIZE; \ 333 .balign PAGE_SIZE; \
330ENTRY(name) 334ENTRY(name)
@@ -401,7 +405,8 @@ NEXT_PAGE(level2_spare_pgt)
401 .globl early_gdt_descr 405 .globl early_gdt_descr
402early_gdt_descr: 406early_gdt_descr:
403 .word GDT_ENTRIES*8-1 407 .word GDT_ENTRIES*8-1
404 .quad per_cpu__gdt_page 408early_gdt_descr_base:
409 .quad INIT_PER_CPU_VAR(gdt_page)
405 410
406ENTRY(phys_base) 411ENTRY(phys_base)
407 /* This must match the first entry in level2_kernel_pgt */ 412 /* This must match the first entry in level2_kernel_pgt */
@@ -412,7 +417,7 @@ ENTRY(phys_base)
412 .section .bss, "aw", @nobits 417 .section .bss, "aw", @nobits
413 .align L1_CACHE_BYTES 418 .align L1_CACHE_BYTES
414ENTRY(idt_table) 419ENTRY(idt_table)
415 .skip 256 * 16 420 .skip IDT_ENTRIES * 16
416 421
417 .section .bss.page_aligned, "aw", @nobits 422 .section .bss.page_aligned, "aw", @nobits
418 .align PAGE_SIZE 423 .align PAGE_SIZE
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 11d5093eb28..df89102bef8 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -22,7 +22,6 @@
22#include <asm/pgtable.h> 22#include <asm/pgtable.h>
23#include <asm/desc.h> 23#include <asm/desc.h>
24#include <asm/apic.h> 24#include <asm/apic.h>
25#include <asm/arch_hooks.h>
26#include <asm/i8259.h> 25#include <asm/i8259.h>
27 26
28/* 27/*
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index b12208f4dfe..99c4d308f16 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -85,19 +85,8 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
85 85
86 t->io_bitmap_max = bytes; 86 t->io_bitmap_max = bytes;
87 87
88#ifdef CONFIG_X86_32
89 /*
90 * Sets the lazy trigger so that the next I/O operation will
91 * reload the correct bitmap.
92 * Reset the owner so that a process switch will not set
93 * tss->io_bitmap_base to IO_BITMAP_OFFSET.
94 */
95 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
96 tss->io_bitmap_owner = NULL;
97#else
98 /* Update the TSS: */ 88 /* Update the TSS: */
99 memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated); 89 memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated);
100#endif
101 90
102 put_cpu(); 91 put_cpu();
103 92
@@ -131,9 +120,8 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
131} 120}
132 121
133#ifdef CONFIG_X86_32 122#ifdef CONFIG_X86_32
134asmlinkage long sys_iopl(unsigned long regsp) 123long sys_iopl(struct pt_regs *regs)
135{ 124{
136 struct pt_regs *regs = (struct pt_regs *)&regsp;
137 unsigned int level = regs->bx; 125 unsigned int level = regs->bx;
138 struct thread_struct *t = &current->thread; 126 struct thread_struct *t = &current->thread;
139 int rc; 127 int rc;
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c
deleted file mode 100644
index 285bbf8831f..00000000000
--- a/arch/x86/kernel/ipi.c
+++ /dev/null
@@ -1,190 +0,0 @@
1#include <linux/cpumask.h>
2#include <linux/interrupt.h>
3#include <linux/init.h>
4
5#include <linux/mm.h>
6#include <linux/delay.h>
7#include <linux/spinlock.h>
8#include <linux/kernel_stat.h>
9#include <linux/mc146818rtc.h>
10#include <linux/cache.h>
11#include <linux/cpu.h>
12#include <linux/module.h>
13
14#include <asm/smp.h>
15#include <asm/mtrr.h>
16#include <asm/tlbflush.h>
17#include <asm/mmu_context.h>
18#include <asm/apic.h>
19#include <asm/proto.h>
20
21#ifdef CONFIG_X86_32
22#include <mach_apic.h>
23#include <mach_ipi.h>
24
25/*
26 * the following functions deal with sending IPIs between CPUs.
27 *
28 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
29 */
30
31static inline int __prepare_ICR(unsigned int shortcut, int vector)
32{
33 unsigned int icr = shortcut | APIC_DEST_LOGICAL;
34
35 switch (vector) {
36 default:
37 icr |= APIC_DM_FIXED | vector;
38 break;
39 case NMI_VECTOR:
40 icr |= APIC_DM_NMI;
41 break;
42 }
43 return icr;
44}
45
46static inline int __prepare_ICR2(unsigned int mask)
47{
48 return SET_APIC_DEST_FIELD(mask);
49}
50
51void __send_IPI_shortcut(unsigned int shortcut, int vector)
52{
53 /*
54 * Subtle. In the case of the 'never do double writes' workaround
55 * we have to lock out interrupts to be safe. As we don't care
56 * of the value read we use an atomic rmw access to avoid costly
57 * cli/sti. Otherwise we use an even cheaper single atomic write
58 * to the APIC.
59 */
60 unsigned int cfg;
61
62 /*
63 * Wait for idle.
64 */
65 apic_wait_icr_idle();
66
67 /*
68 * No need to touch the target chip field
69 */
70 cfg = __prepare_ICR(shortcut, vector);
71
72 /*
73 * Send the IPI. The write to APIC_ICR fires this off.
74 */
75 apic_write(APIC_ICR, cfg);
76}
77
78void send_IPI_self(int vector)
79{
80 __send_IPI_shortcut(APIC_DEST_SELF, vector);
81}
82
83/*
84 * This is used to send an IPI with no shorthand notation (the destination is
85 * specified in bits 56 to 63 of the ICR).
86 */
87static inline void __send_IPI_dest_field(unsigned long mask, int vector)
88{
89 unsigned long cfg;
90
91 /*
92 * Wait for idle.
93 */
94 if (unlikely(vector == NMI_VECTOR))
95 safe_apic_wait_icr_idle();
96 else
97 apic_wait_icr_idle();
98
99 /*
100 * prepare target chip field
101 */
102 cfg = __prepare_ICR2(mask);
103 apic_write(APIC_ICR2, cfg);
104
105 /*
106 * program the ICR
107 */
108 cfg = __prepare_ICR(0, vector);
109
110 /*
111 * Send the IPI. The write to APIC_ICR fires this off.
112 */
113 apic_write(APIC_ICR, cfg);
114}
115
116/*
117 * This is only used on smaller machines.
118 */
119void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector)
120{
121 unsigned long mask = cpumask_bits(cpumask)[0];
122 unsigned long flags;
123
124 local_irq_save(flags);
125 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
126 __send_IPI_dest_field(mask, vector);
127 local_irq_restore(flags);
128}
129
130void send_IPI_mask_sequence(const struct cpumask *mask, int vector)
131{
132 unsigned long flags;
133 unsigned int query_cpu;
134
135 /*
136 * Hack. The clustered APIC addressing mode doesn't allow us to send
137 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
138 * should be modified to do 1 message per cluster ID - mbligh
139 */
140
141 local_irq_save(flags);
142 for_each_cpu(query_cpu, mask)
143 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector);
144 local_irq_restore(flags);
145}
146
147void send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
148{
149 unsigned long flags;
150 unsigned int query_cpu;
151 unsigned int this_cpu = smp_processor_id();
152
153 /* See Hack comment above */
154
155 local_irq_save(flags);
156 for_each_cpu(query_cpu, mask)
157 if (query_cpu != this_cpu)
158 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
159 vector);
160 local_irq_restore(flags);
161}
162
163/* must come after the send_IPI functions above for inlining */
164static int convert_apicid_to_cpu(int apic_id)
165{
166 int i;
167
168 for_each_possible_cpu(i) {
169 if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
170 return i;
171 }
172 return -1;
173}
174
175int safe_smp_processor_id(void)
176{
177 int apicid, cpuid;
178
179 if (!boot_cpu_has(X86_FEATURE_APIC))
180 return 0;
181
182 apicid = hard_smp_processor_id();
183 if (apicid == BAD_APICID)
184 return 0;
185
186 cpuid = convert_apicid_to_cpu(apicid);
187
188 return cpuid >= 0 ? cpuid : 0;
189}
190#endif
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 3973e2df7f8..b8ac3b6cf77 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -6,13 +6,18 @@
6#include <linux/kernel_stat.h> 6#include <linux/kernel_stat.h>
7#include <linux/seq_file.h> 7#include <linux/seq_file.h>
8#include <linux/smp.h> 8#include <linux/smp.h>
9#include <linux/ftrace.h>
9 10
10#include <asm/apic.h> 11#include <asm/apic.h>
11#include <asm/io_apic.h> 12#include <asm/io_apic.h>
12#include <asm/irq.h> 13#include <asm/irq.h>
14#include <asm/idle.h>
13 15
14atomic_t irq_err_count; 16atomic_t irq_err_count;
15 17
18/* Function pointer for generic interrupt vector handling */
19void (*generic_interrupt_extension)(void) = NULL;
20
16/* 21/*
17 * 'what should we do if we get a hw irq event on an illegal vector'. 22 * 'what should we do if we get a hw irq event on an illegal vector'.
18 * each architecture has to answer this themselves. 23 * each architecture has to answer this themselves.
@@ -36,63 +41,65 @@ void ack_bad_irq(unsigned int irq)
36#endif 41#endif
37} 42}
38 43
39#ifdef CONFIG_X86_32 44#define irq_stats(x) (&per_cpu(irq_stat, x))
40# define irq_stats(x) (&per_cpu(irq_stat, x))
41#else
42# define irq_stats(x) cpu_pda(x)
43#endif
44/* 45/*
45 * /proc/interrupts printing: 46 * /proc/interrupts printing:
46 */ 47 */
47static int show_other_interrupts(struct seq_file *p) 48static int show_other_interrupts(struct seq_file *p, int prec)
48{ 49{
49 int j; 50 int j;
50 51
51 seq_printf(p, "NMI: "); 52 seq_printf(p, "%*s: ", prec, "NMI");
52 for_each_online_cpu(j) 53 for_each_online_cpu(j)
53 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count); 54 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
54 seq_printf(p, " Non-maskable interrupts\n"); 55 seq_printf(p, " Non-maskable interrupts\n");
55#ifdef CONFIG_X86_LOCAL_APIC 56#ifdef CONFIG_X86_LOCAL_APIC
56 seq_printf(p, "LOC: "); 57 seq_printf(p, "%*s: ", prec, "LOC");
57 for_each_online_cpu(j) 58 for_each_online_cpu(j)
58 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); 59 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
59 seq_printf(p, " Local timer interrupts\n"); 60 seq_printf(p, " Local timer interrupts\n");
60#endif 61#endif
62 if (generic_interrupt_extension) {
63 seq_printf(p, "PLT: ");
64 for_each_online_cpu(j)
65 seq_printf(p, "%10u ", irq_stats(j)->generic_irqs);
66 seq_printf(p, " Platform interrupts\n");
67 }
61#ifdef CONFIG_SMP 68#ifdef CONFIG_SMP
62 seq_printf(p, "RES: "); 69 seq_printf(p, "%*s: ", prec, "RES");
63 for_each_online_cpu(j) 70 for_each_online_cpu(j)
64 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); 71 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
65 seq_printf(p, " Rescheduling interrupts\n"); 72 seq_printf(p, " Rescheduling interrupts\n");
66 seq_printf(p, "CAL: "); 73 seq_printf(p, "%*s: ", prec, "CAL");
67 for_each_online_cpu(j) 74 for_each_online_cpu(j)
68 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); 75 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
69 seq_printf(p, " Function call interrupts\n"); 76 seq_printf(p, " Function call interrupts\n");
70 seq_printf(p, "TLB: "); 77 seq_printf(p, "%*s: ", prec, "TLB");
71 for_each_online_cpu(j) 78 for_each_online_cpu(j)
72 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); 79 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
73 seq_printf(p, " TLB shootdowns\n"); 80 seq_printf(p, " TLB shootdowns\n");
74#endif 81#endif
75#ifdef CONFIG_X86_MCE 82#ifdef CONFIG_X86_MCE
76 seq_printf(p, "TRM: "); 83 seq_printf(p, "%*s: ", prec, "TRM");
77 for_each_online_cpu(j) 84 for_each_online_cpu(j)
78 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); 85 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
79 seq_printf(p, " Thermal event interrupts\n"); 86 seq_printf(p, " Thermal event interrupts\n");
80# ifdef CONFIG_X86_64 87# ifdef CONFIG_X86_64
81 seq_printf(p, "THR: "); 88 seq_printf(p, "%*s: ", prec, "THR");
82 for_each_online_cpu(j) 89 for_each_online_cpu(j)
83 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); 90 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
84 seq_printf(p, " Threshold APIC interrupts\n"); 91 seq_printf(p, " Threshold APIC interrupts\n");
85# endif 92# endif
86#endif 93#endif
87#ifdef CONFIG_X86_LOCAL_APIC 94#ifdef CONFIG_X86_LOCAL_APIC
88 seq_printf(p, "SPU: "); 95 seq_printf(p, "%*s: ", prec, "SPU");
89 for_each_online_cpu(j) 96 for_each_online_cpu(j)
90 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); 97 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
91 seq_printf(p, " Spurious interrupts\n"); 98 seq_printf(p, " Spurious interrupts\n");
92#endif 99#endif
93 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 100 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
94#if defined(CONFIG_X86_IO_APIC) 101#if defined(CONFIG_X86_IO_APIC)
95 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count)); 102 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
96#endif 103#endif
97 return 0; 104 return 0;
98} 105}
@@ -100,19 +107,22 @@ static int show_other_interrupts(struct seq_file *p)
100int show_interrupts(struct seq_file *p, void *v) 107int show_interrupts(struct seq_file *p, void *v)
101{ 108{
102 unsigned long flags, any_count = 0; 109 unsigned long flags, any_count = 0;
103 int i = *(loff_t *) v, j; 110 int i = *(loff_t *) v, j, prec;
104 struct irqaction *action; 111 struct irqaction *action;
105 struct irq_desc *desc; 112 struct irq_desc *desc;
106 113
107 if (i > nr_irqs) 114 if (i > nr_irqs)
108 return 0; 115 return 0;
109 116
117 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
118 j *= 10;
119
110 if (i == nr_irqs) 120 if (i == nr_irqs)
111 return show_other_interrupts(p); 121 return show_other_interrupts(p, prec);
112 122
113 /* print header */ 123 /* print header */
114 if (i == 0) { 124 if (i == 0) {
115 seq_printf(p, " "); 125 seq_printf(p, "%*s", prec + 8, "");
116 for_each_online_cpu(j) 126 for_each_online_cpu(j)
117 seq_printf(p, "CPU%-8d", j); 127 seq_printf(p, "CPU%-8d", j);
118 seq_putc(p, '\n'); 128 seq_putc(p, '\n');
@@ -133,7 +143,7 @@ int show_interrupts(struct seq_file *p, void *v)
133 if (!action && !any_count) 143 if (!action && !any_count)
134 goto out; 144 goto out;
135 145
136 seq_printf(p, "%3d: ", i); 146 seq_printf(p, "%*d: ", prec, i);
137#ifndef CONFIG_SMP 147#ifndef CONFIG_SMP
138 seq_printf(p, "%10u ", kstat_irqs(i)); 148 seq_printf(p, "%10u ", kstat_irqs(i));
139#else 149#else
@@ -165,6 +175,8 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
165#ifdef CONFIG_X86_LOCAL_APIC 175#ifdef CONFIG_X86_LOCAL_APIC
166 sum += irq_stats(cpu)->apic_timer_irqs; 176 sum += irq_stats(cpu)->apic_timer_irqs;
167#endif 177#endif
178 if (generic_interrupt_extension)
179 sum += irq_stats(cpu)->generic_irqs;
168#ifdef CONFIG_SMP 180#ifdef CONFIG_SMP
169 sum += irq_stats(cpu)->irq_resched_count; 181 sum += irq_stats(cpu)->irq_resched_count;
170 sum += irq_stats(cpu)->irq_call_count; 182 sum += irq_stats(cpu)->irq_call_count;
@@ -192,4 +204,63 @@ u64 arch_irq_stat(void)
192 return sum; 204 return sum;
193} 205}
194 206
207
208/*
209 * do_IRQ handles all normal device IRQ's (the special
210 * SMP cross-CPU interrupts have their own specific
211 * handlers).
212 */
213unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
214{
215 struct pt_regs *old_regs = set_irq_regs(regs);
216
217 /* high bit used in ret_from_ code */
218 unsigned vector = ~regs->orig_ax;
219 unsigned irq;
220
221 exit_idle();
222 irq_enter();
223
224 irq = __get_cpu_var(vector_irq)[vector];
225
226 if (!handle_irq(irq, regs)) {
227#ifdef CONFIG_X86_64
228 if (!disable_apic)
229 ack_APIC_irq();
230#endif
231
232 if (printk_ratelimit())
233 printk(KERN_EMERG "%s: %d.%d No irq handler for vector (irq %d)\n",
234 __func__, smp_processor_id(), vector, irq);
235 }
236
237 irq_exit();
238
239 set_irq_regs(old_regs);
240 return 1;
241}
242
243/*
244 * Handler for GENERIC_INTERRUPT_VECTOR.
245 */
246void smp_generic_interrupt(struct pt_regs *regs)
247{
248 struct pt_regs *old_regs = set_irq_regs(regs);
249
250 ack_APIC_irq();
251
252 exit_idle();
253
254 irq_enter();
255
256 inc_irq_stat(generic_irqs);
257
258 if (generic_interrupt_extension)
259 generic_interrupt_extension();
260
261 irq_exit();
262
263 set_irq_regs(old_regs);
264}
265
195EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); 266EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 74b9ff7341e..3b09634a515 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -16,6 +16,7 @@
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/uaccess.h> 18#include <linux/uaccess.h>
19#include <linux/percpu.h>
19 20
20#include <asm/apic.h> 21#include <asm/apic.h>
21 22
@@ -55,13 +56,13 @@ static inline void print_stack_overflow(void) { }
55union irq_ctx { 56union irq_ctx {
56 struct thread_info tinfo; 57 struct thread_info tinfo;
57 u32 stack[THREAD_SIZE/sizeof(u32)]; 58 u32 stack[THREAD_SIZE/sizeof(u32)];
58}; 59} __attribute__((aligned(PAGE_SIZE)));
59 60
60static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; 61static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
61static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; 62static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
62 63
63static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; 64static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, hardirq_stack);
64static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; 65static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, softirq_stack);
65 66
66static void call_on_stack(void *func, void *stack) 67static void call_on_stack(void *func, void *stack)
67{ 68{
@@ -81,7 +82,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
81 u32 *isp, arg1, arg2; 82 u32 *isp, arg1, arg2;
82 83
83 curctx = (union irq_ctx *) current_thread_info(); 84 curctx = (union irq_ctx *) current_thread_info();
84 irqctx = hardirq_ctx[smp_processor_id()]; 85 irqctx = __get_cpu_var(hardirq_ctx);
85 86
86 /* 87 /*
87 * this is where we switch to the IRQ stack. However, if we are 88 * this is where we switch to the IRQ stack. However, if we are
@@ -125,34 +126,34 @@ void __cpuinit irq_ctx_init(int cpu)
125{ 126{
126 union irq_ctx *irqctx; 127 union irq_ctx *irqctx;
127 128
128 if (hardirq_ctx[cpu]) 129 if (per_cpu(hardirq_ctx, cpu))
129 return; 130 return;
130 131
131 irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE]; 132 irqctx = &per_cpu(hardirq_stack, cpu);
132 irqctx->tinfo.task = NULL; 133 irqctx->tinfo.task = NULL;
133 irqctx->tinfo.exec_domain = NULL; 134 irqctx->tinfo.exec_domain = NULL;
134 irqctx->tinfo.cpu = cpu; 135 irqctx->tinfo.cpu = cpu;
135 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; 136 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
136 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 137 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
137 138
138 hardirq_ctx[cpu] = irqctx; 139 per_cpu(hardirq_ctx, cpu) = irqctx;
139 140
140 irqctx = (union irq_ctx *) &softirq_stack[cpu*THREAD_SIZE]; 141 irqctx = &per_cpu(softirq_stack, cpu);
141 irqctx->tinfo.task = NULL; 142 irqctx->tinfo.task = NULL;
142 irqctx->tinfo.exec_domain = NULL; 143 irqctx->tinfo.exec_domain = NULL;
143 irqctx->tinfo.cpu = cpu; 144 irqctx->tinfo.cpu = cpu;
144 irqctx->tinfo.preempt_count = 0; 145 irqctx->tinfo.preempt_count = 0;
145 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 146 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
146 147
147 softirq_ctx[cpu] = irqctx; 148 per_cpu(softirq_ctx, cpu) = irqctx;
148 149
149 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", 150 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
150 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); 151 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
151} 152}
152 153
153void irq_ctx_exit(int cpu) 154void irq_ctx_exit(int cpu)
154{ 155{
155 hardirq_ctx[cpu] = NULL; 156 per_cpu(hardirq_ctx, cpu) = NULL;
156} 157}
157 158
158asmlinkage void do_softirq(void) 159asmlinkage void do_softirq(void)
@@ -169,7 +170,7 @@ asmlinkage void do_softirq(void)
169 170
170 if (local_softirq_pending()) { 171 if (local_softirq_pending()) {
171 curctx = current_thread_info(); 172 curctx = current_thread_info();
172 irqctx = softirq_ctx[smp_processor_id()]; 173 irqctx = __get_cpu_var(softirq_ctx);
173 irqctx->tinfo.task = curctx->task; 174 irqctx->tinfo.task = curctx->task;
174 irqctx->tinfo.previous_esp = current_stack_pointer; 175 irqctx->tinfo.previous_esp = current_stack_pointer;
175 176
@@ -191,33 +192,16 @@ static inline int
191execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; } 192execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
192#endif 193#endif
193 194
194/* 195bool handle_irq(unsigned irq, struct pt_regs *regs)
195 * do_IRQ handles all normal device IRQ's (the special
196 * SMP cross-CPU interrupts have their own specific
197 * handlers).
198 */
199unsigned int do_IRQ(struct pt_regs *regs)
200{ 196{
201 struct pt_regs *old_regs;
202 /* high bit used in ret_from_ code */
203 int overflow;
204 unsigned vector = ~regs->orig_ax;
205 struct irq_desc *desc; 197 struct irq_desc *desc;
206 unsigned irq; 198 int overflow;
207
208
209 old_regs = set_irq_regs(regs);
210 irq_enter();
211 irq = __get_cpu_var(vector_irq)[vector];
212 199
213 overflow = check_stack_overflow(); 200 overflow = check_stack_overflow();
214 201
215 desc = irq_to_desc(irq); 202 desc = irq_to_desc(irq);
216 if (unlikely(!desc)) { 203 if (unlikely(!desc))
217 printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x cpu %d\n", 204 return false;
218 __func__, irq, vector, smp_processor_id());
219 BUG();
220 }
221 205
222 if (!execute_on_irq_stack(overflow, desc, irq)) { 206 if (!execute_on_irq_stack(overflow, desc, irq)) {
223 if (unlikely(overflow)) 207 if (unlikely(overflow))
@@ -225,13 +209,10 @@ unsigned int do_IRQ(struct pt_regs *regs)
225 desc->handle_irq(irq, desc); 209 desc->handle_irq(irq, desc);
226 } 210 }
227 211
228 irq_exit(); 212 return true;
229 set_irq_regs(old_regs);
230 return 1;
231} 213}
232 214
233#ifdef CONFIG_HOTPLUG_CPU 215#ifdef CONFIG_HOTPLUG_CPU
234#include <mach_apic.h>
235 216
236/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ 217/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
237void fixup_irqs(void) 218void fixup_irqs(void)
@@ -248,7 +229,7 @@ void fixup_irqs(void)
248 if (irq == 2) 229 if (irq == 2)
249 continue; 230 continue;
250 231
251 affinity = &desc->affinity; 232 affinity = desc->affinity;
252 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { 233 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
253 printk("Breaking affinity for irq %i\n", irq); 234 printk("Breaking affinity for irq %i\n", irq);
254 affinity = cpu_all_mask; 235 affinity = cpu_all_mask;
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 63c88e6ec02..977d8b43a0d 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -18,6 +18,13 @@
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <asm/io_apic.h> 19#include <asm/io_apic.h>
20#include <asm/idle.h> 20#include <asm/idle.h>
21#include <asm/apic.h>
22
23DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
24EXPORT_PER_CPU_SYMBOL(irq_stat);
25
26DEFINE_PER_CPU(struct pt_regs *, irq_regs);
27EXPORT_PER_CPU_SYMBOL(irq_regs);
21 28
22/* 29/*
23 * Probabilistic stack overflow check: 30 * Probabilistic stack overflow check:
@@ -41,42 +48,18 @@ static inline void stack_overflow_check(struct pt_regs *regs)
41#endif 48#endif
42} 49}
43 50
44/* 51bool handle_irq(unsigned irq, struct pt_regs *regs)
45 * do_IRQ handles all normal device IRQ's (the special
46 * SMP cross-CPU interrupts have their own specific
47 * handlers).
48 */
49asmlinkage unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
50{ 52{
51 struct pt_regs *old_regs = set_irq_regs(regs);
52 struct irq_desc *desc; 53 struct irq_desc *desc;
53 54
54 /* high bit used in ret_from_ code */
55 unsigned vector = ~regs->orig_ax;
56 unsigned irq;
57
58 exit_idle();
59 irq_enter();
60 irq = __get_cpu_var(vector_irq)[vector];
61
62 stack_overflow_check(regs); 55 stack_overflow_check(regs);
63 56
64 desc = irq_to_desc(irq); 57 desc = irq_to_desc(irq);
65 if (likely(desc)) 58 if (unlikely(!desc))
66 generic_handle_irq_desc(irq, desc); 59 return false;
67 else {
68 if (!disable_apic)
69 ack_APIC_irq();
70
71 if (printk_ratelimit())
72 printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
73 __func__, smp_processor_id(), vector);
74 }
75
76 irq_exit();
77 60
78 set_irq_regs(old_regs); 61 generic_handle_irq_desc(irq, desc);
79 return 1; 62 return true;
80} 63}
81 64
82#ifdef CONFIG_HOTPLUG_CPU 65#ifdef CONFIG_HOTPLUG_CPU
@@ -100,7 +83,7 @@ void fixup_irqs(void)
100 /* interrupt's are disabled at this point */ 83 /* interrupt's are disabled at this point */
101 spin_lock(&desc->lock); 84 spin_lock(&desc->lock);
102 85
103 affinity = &desc->affinity; 86 affinity = desc->affinity;
104 if (!irq_has_action(irq) || 87 if (!irq_has_action(irq) ||
105 cpumask_equal(affinity, cpu_online_mask)) { 88 cpumask_equal(affinity, cpu_online_mask)) {
106 spin_unlock(&desc->lock); 89 spin_unlock(&desc->lock);
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index 10a09c2f182..bc132610544 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -18,7 +18,7 @@
18#include <asm/pgtable.h> 18#include <asm/pgtable.h>
19#include <asm/desc.h> 19#include <asm/desc.h>
20#include <asm/apic.h> 20#include <asm/apic.h>
21#include <asm/arch_hooks.h> 21#include <asm/setup.h>
22#include <asm/i8259.h> 22#include <asm/i8259.h>
23#include <asm/traps.h> 23#include <asm/traps.h>
24 24
@@ -78,6 +78,15 @@ void __init init_ISA_irqs(void)
78 } 78 }
79} 79}
80 80
81/*
82 * IRQ2 is cascade interrupt to second interrupt controller
83 */
84static struct irqaction irq2 = {
85 .handler = no_action,
86 .mask = CPU_MASK_NONE,
87 .name = "cascade",
88};
89
81DEFINE_PER_CPU(vector_irq_t, vector_irq) = { 90DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
82 [0 ... IRQ0_VECTOR - 1] = -1, 91 [0 ... IRQ0_VECTOR - 1] = -1,
83 [IRQ0_VECTOR] = 0, 92 [IRQ0_VECTOR] = 0,
@@ -118,8 +127,8 @@ void __init native_init_IRQ(void)
118{ 127{
119 int i; 128 int i;
120 129
121 /* all the set up before the call gates are initialised */ 130 /* Execute any quirks before the call gates are initialised: */
122 pre_intr_init_hook(); 131 x86_quirk_pre_intr_init();
123 132
124 /* 133 /*
125 * Cover the whole vector space, no vector can escape 134 * Cover the whole vector space, no vector can escape
@@ -140,8 +149,15 @@ void __init native_init_IRQ(void)
140 */ 149 */
141 alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); 150 alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
142 151
143 /* IPI for invalidation */ 152 /* IPIs for invalidation */
144 alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); 153 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
154 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
155 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
156 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
157 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
158 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
159 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
160 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
145 161
146 /* IPI for generic function call */ 162 /* IPI for generic function call */
147 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); 163 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
@@ -159,6 +175,9 @@ void __init native_init_IRQ(void)
159 /* self generated IPI for local APIC timer */ 175 /* self generated IPI for local APIC timer */
160 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); 176 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
161 177
178 /* generic IPI for platform specific use */
179 alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
180
162 /* IPI vectors for APIC spurious and error interrupts */ 181 /* IPI vectors for APIC spurious and error interrupts */
163 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); 182 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
164 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); 183 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
@@ -169,10 +188,14 @@ void __init native_init_IRQ(void)
169 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); 188 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
170#endif 189#endif
171 190
172 /* setup after call gates are initialised (usually add in 191 if (!acpi_ioapic)
173 * the architecture specific gates) 192 setup_irq(2, &irq2);
193
194 /*
195 * Call quirks after call gates are initialised (usually add in
196 * the architecture specific gates):
174 */ 197 */
175 intr_init_hook(); 198 x86_quirk_intr_init();
176 199
177 /* 200 /*
178 * External FPU? Set up irq13 if so, for 201 * External FPU? Set up irq13 if so, for
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c
index da481a1e3f3..c7a49e0ffbf 100644
--- a/arch/x86/kernel/irqinit_64.c
+++ b/arch/x86/kernel/irqinit_64.c
@@ -147,6 +147,9 @@ static void __init apic_intr_init(void)
147 /* self generated IPI for local APIC timer */ 147 /* self generated IPI for local APIC timer */
148 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); 148 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
149 149
150 /* generic IPI for platform specific use */
151 alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
152
150 /* IPI vectors for APIC spurious and error interrupts */ 153 /* IPI vectors for APIC spurious and error interrupts */
151 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); 154 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
152 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); 155 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 10435a120d2..eedfaebe106 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -46,7 +46,7 @@
46#include <asm/apicdef.h> 46#include <asm/apicdef.h>
47#include <asm/system.h> 47#include <asm/system.h>
48 48
49#include <mach_ipi.h> 49#include <asm/apic.h>
50 50
51/* 51/*
52 * Put the error code here just in case the user cares: 52 * Put the error code here just in case the user cares:
@@ -347,7 +347,7 @@ void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
347 */ 347 */
348void kgdb_roundup_cpus(unsigned long flags) 348void kgdb_roundup_cpus(unsigned long flags)
349{ 349{
350 send_IPI_allbutself(APIC_DM_NMI); 350 apic->send_IPI_allbutself(APIC_DM_NMI);
351} 351}
352#endif 352#endif
353 353
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 652fce6d2cc..137f2e8132d 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -19,7 +19,6 @@
19#include <linux/clocksource.h> 19#include <linux/clocksource.h>
20#include <linux/kvm_para.h> 20#include <linux/kvm_para.h>
21#include <asm/pvclock.h> 21#include <asm/pvclock.h>
22#include <asm/arch_hooks.h>
23#include <asm/msr.h> 22#include <asm/msr.h>
24#include <asm/apic.h> 23#include <asm/apic.h>
25#include <linux/percpu.h> 24#include <linux/percpu.h>
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 37f420018a4..e7368c1da01 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -14,12 +14,12 @@
14#include <linux/ftrace.h> 14#include <linux/ftrace.h>
15#include <linux/suspend.h> 15#include <linux/suspend.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/io.h>
17 18
18#include <asm/pgtable.h> 19#include <asm/pgtable.h>
19#include <asm/pgalloc.h> 20#include <asm/pgalloc.h>
20#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
21#include <asm/mmu_context.h> 22#include <asm/mmu_context.h>
22#include <asm/io.h>
23#include <asm/apic.h> 23#include <asm/apic.h>
24#include <asm/cpufeature.h> 24#include <asm/cpufeature.h>
25#include <asm/desc.h> 25#include <asm/desc.h>
@@ -63,7 +63,7 @@ static void load_segments(void)
63 "\tmovl %%eax,%%fs\n" 63 "\tmovl %%eax,%%fs\n"
64 "\tmovl %%eax,%%gs\n" 64 "\tmovl %%eax,%%gs\n"
65 "\tmovl %%eax,%%ss\n" 65 "\tmovl %%eax,%%ss\n"
66 ::: "eax", "memory"); 66 : : : "eax", "memory");
67#undef STR 67#undef STR
68#undef __STR 68#undef __STR
69} 69}
@@ -121,7 +121,7 @@ static void machine_kexec_page_table_set_one(
121static void machine_kexec_prepare_page_tables(struct kimage *image) 121static void machine_kexec_prepare_page_tables(struct kimage *image)
122{ 122{
123 void *control_page; 123 void *control_page;
124 pmd_t *pmd = 0; 124 pmd_t *pmd = NULL;
125 125
126 control_page = page_address(image->control_code_page); 126 control_page = page_address(image->control_code_page);
127#ifdef CONFIG_X86_PAE 127#ifdef CONFIG_X86_PAE
@@ -205,7 +205,8 @@ void machine_kexec(struct kimage *image)
205 205
206 if (image->preserve_context) { 206 if (image->preserve_context) {
207#ifdef CONFIG_X86_IO_APIC 207#ifdef CONFIG_X86_IO_APIC
208 /* We need to put APICs in legacy mode so that we can 208 /*
209 * We need to put APICs in legacy mode so that we can
209 * get timer interrupts in second kernel. kexec/kdump 210 * get timer interrupts in second kernel. kexec/kdump
210 * paths already have calls to disable_IO_APIC() in 211 * paths already have calls to disable_IO_APIC() in
211 * one form or other. kexec jump path also need 212 * one form or other. kexec jump path also need
@@ -227,7 +228,8 @@ void machine_kexec(struct kimage *image)
227 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) 228 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
228 << PAGE_SHIFT); 229 << PAGE_SHIFT);
229 230
230 /* The segment registers are funny things, they have both a 231 /*
232 * The segment registers are funny things, they have both a
231 * visible and an invisible part. Whenever the visible part is 233 * visible and an invisible part. Whenever the visible part is
232 * set to a specific selector, the invisible part is loaded 234 * set to a specific selector, the invisible part is loaded
233 * with from a table in memory. At no other time is the 235 * with from a table in memory. At no other time is the
@@ -237,11 +239,12 @@ void machine_kexec(struct kimage *image)
237 * segments, before I zap the gdt with an invalid value. 239 * segments, before I zap the gdt with an invalid value.
238 */ 240 */
239 load_segments(); 241 load_segments();
240 /* The gdt & idt are now invalid. 242 /*
243 * The gdt & idt are now invalid.
241 * If you want to load them you must set up your own idt & gdt. 244 * If you want to load them you must set up your own idt & gdt.
242 */ 245 */
243 set_gdt(phys_to_virt(0),0); 246 set_gdt(phys_to_virt(0), 0);
244 set_idt(phys_to_virt(0),0); 247 set_idt(phys_to_virt(0), 0);
245 248
246 /* now call it */ 249 /* now call it */
247 image->start = relocate_kernel_ptr((unsigned long)image->head, 250 image->start = relocate_kernel_ptr((unsigned long)image->head,
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index c43caa3a91f..89cea4d4467 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -12,20 +12,47 @@
12#include <linux/reboot.h> 12#include <linux/reboot.h>
13#include <linux/numa.h> 13#include <linux/numa.h>
14#include <linux/ftrace.h> 14#include <linux/ftrace.h>
15#include <linux/io.h>
16#include <linux/suspend.h>
15 17
16#include <asm/pgtable.h> 18#include <asm/pgtable.h>
17#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
18#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
19#include <asm/io.h>
20 21
21#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE))) 22static int init_one_level2_page(struct kimage *image, pgd_t *pgd,
22static u64 kexec_pgd[512] PAGE_ALIGNED; 23 unsigned long addr)
23static u64 kexec_pud0[512] PAGE_ALIGNED; 24{
24static u64 kexec_pmd0[512] PAGE_ALIGNED; 25 pud_t *pud;
25static u64 kexec_pte0[512] PAGE_ALIGNED; 26 pmd_t *pmd;
26static u64 kexec_pud1[512] PAGE_ALIGNED; 27 struct page *page;
27static u64 kexec_pmd1[512] PAGE_ALIGNED; 28 int result = -ENOMEM;
28static u64 kexec_pte1[512] PAGE_ALIGNED; 29
30 addr &= PMD_MASK;
31 pgd += pgd_index(addr);
32 if (!pgd_present(*pgd)) {
33 page = kimage_alloc_control_pages(image, 0);
34 if (!page)
35 goto out;
36 pud = (pud_t *)page_address(page);
37 memset(pud, 0, PAGE_SIZE);
38 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
39 }
40 pud = pud_offset(pgd, addr);
41 if (!pud_present(*pud)) {
42 page = kimage_alloc_control_pages(image, 0);
43 if (!page)
44 goto out;
45 pmd = (pmd_t *)page_address(page);
46 memset(pmd, 0, PAGE_SIZE);
47 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
48 }
49 pmd = pmd_offset(pud, addr);
50 if (!pmd_present(*pmd))
51 set_pmd(pmd, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
52 result = 0;
53out:
54 return result;
55}
29 56
30static void init_level2_page(pmd_t *level2p, unsigned long addr) 57static void init_level2_page(pmd_t *level2p, unsigned long addr)
31{ 58{
@@ -92,9 +119,8 @@ static int init_level4_page(struct kimage *image, pgd_t *level4p,
92 } 119 }
93 level3p = (pud_t *)page_address(page); 120 level3p = (pud_t *)page_address(page);
94 result = init_level3_page(image, level3p, addr, last_addr); 121 result = init_level3_page(image, level3p, addr, last_addr);
95 if (result) { 122 if (result)
96 goto out; 123 goto out;
97 }
98 set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE)); 124 set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
99 addr += PGDIR_SIZE; 125 addr += PGDIR_SIZE;
100 } 126 }
@@ -107,12 +133,72 @@ out:
107 return result; 133 return result;
108} 134}
109 135
136static void free_transition_pgtable(struct kimage *image)
137{
138 free_page((unsigned long)image->arch.pud);
139 free_page((unsigned long)image->arch.pmd);
140 free_page((unsigned long)image->arch.pte);
141}
142
143static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
144{
145 pud_t *pud;
146 pmd_t *pmd;
147 pte_t *pte;
148 unsigned long vaddr, paddr;
149 int result = -ENOMEM;
150
151 vaddr = (unsigned long)relocate_kernel;
152 paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
153 pgd += pgd_index(vaddr);
154 if (!pgd_present(*pgd)) {
155 pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
156 if (!pud)
157 goto err;
158 image->arch.pud = pud;
159 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
160 }
161 pud = pud_offset(pgd, vaddr);
162 if (!pud_present(*pud)) {
163 pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
164 if (!pmd)
165 goto err;
166 image->arch.pmd = pmd;
167 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
168 }
169 pmd = pmd_offset(pud, vaddr);
170 if (!pmd_present(*pmd)) {
171 pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
172 if (!pte)
173 goto err;
174 image->arch.pte = pte;
175 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
176 }
177 pte = pte_offset_kernel(pmd, vaddr);
178 set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
179 return 0;
180err:
181 free_transition_pgtable(image);
182 return result;
183}
184
110 185
111static int init_pgtable(struct kimage *image, unsigned long start_pgtable) 186static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
112{ 187{
113 pgd_t *level4p; 188 pgd_t *level4p;
189 int result;
114 level4p = (pgd_t *)__va(start_pgtable); 190 level4p = (pgd_t *)__va(start_pgtable);
115 return init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT); 191 result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
192 if (result)
193 return result;
194 /*
195 * image->start may be outside 0 ~ max_pfn, for example when
196 * jump back to original kernel from kexeced kernel
197 */
198 result = init_one_level2_page(image, level4p, image->start);
199 if (result)
200 return result;
201 return init_transition_pgtable(image, level4p);
116} 202}
117 203
118static void set_idt(void *newidt, u16 limit) 204static void set_idt(void *newidt, u16 limit)
@@ -174,7 +260,7 @@ int machine_kexec_prepare(struct kimage *image)
174 260
175void machine_kexec_cleanup(struct kimage *image) 261void machine_kexec_cleanup(struct kimage *image)
176{ 262{
177 return; 263 free_transition_pgtable(image);
178} 264}
179 265
180/* 266/*
@@ -185,36 +271,45 @@ void machine_kexec(struct kimage *image)
185{ 271{
186 unsigned long page_list[PAGES_NR]; 272 unsigned long page_list[PAGES_NR];
187 void *control_page; 273 void *control_page;
274 int save_ftrace_enabled;
188 275
189 tracer_disable(); 276#ifdef CONFIG_KEXEC_JUMP
277 if (kexec_image->preserve_context)
278 save_processor_state();
279#endif
280
281 save_ftrace_enabled = __ftrace_enabled_save();
190 282
191 /* Interrupts aren't acceptable while we reboot */ 283 /* Interrupts aren't acceptable while we reboot */
192 local_irq_disable(); 284 local_irq_disable();
193 285
286 if (image->preserve_context) {
287#ifdef CONFIG_X86_IO_APIC
288 /*
289 * We need to put APICs in legacy mode so that we can
290 * get timer interrupts in second kernel. kexec/kdump
291 * paths already have calls to disable_IO_APIC() in
292 * one form or other. kexec jump path also need
293 * one.
294 */
295 disable_IO_APIC();
296#endif
297 }
298
194 control_page = page_address(image->control_code_page) + PAGE_SIZE; 299 control_page = page_address(image->control_code_page) + PAGE_SIZE;
195 memcpy(control_page, relocate_kernel, PAGE_SIZE); 300 memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
196 301
197 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page); 302 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
198 page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel; 303 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
199 page_list[PA_PGD] = virt_to_phys(&kexec_pgd);
200 page_list[VA_PGD] = (unsigned long)kexec_pgd;
201 page_list[PA_PUD_0] = virt_to_phys(&kexec_pud0);
202 page_list[VA_PUD_0] = (unsigned long)kexec_pud0;
203 page_list[PA_PMD_0] = virt_to_phys(&kexec_pmd0);
204 page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
205 page_list[PA_PTE_0] = virt_to_phys(&kexec_pte0);
206 page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
207 page_list[PA_PUD_1] = virt_to_phys(&kexec_pud1);
208 page_list[VA_PUD_1] = (unsigned long)kexec_pud1;
209 page_list[PA_PMD_1] = virt_to_phys(&kexec_pmd1);
210 page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
211 page_list[PA_PTE_1] = virt_to_phys(&kexec_pte1);
212 page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
213
214 page_list[PA_TABLE_PAGE] = 304 page_list[PA_TABLE_PAGE] =
215 (unsigned long)__pa(page_address(image->control_code_page)); 305 (unsigned long)__pa(page_address(image->control_code_page));
216 306
217 /* The segment registers are funny things, they have both a 307 if (image->type == KEXEC_TYPE_DEFAULT)
308 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
309 << PAGE_SHIFT);
310
311 /*
312 * The segment registers are funny things, they have both a
218 * visible and an invisible part. Whenever the visible part is 313 * visible and an invisible part. Whenever the visible part is
219 * set to a specific selector, the invisible part is loaded 314 * set to a specific selector, the invisible part is loaded
220 * with from a table in memory. At no other time is the 315 * with from a table in memory. At no other time is the
@@ -224,15 +319,25 @@ void machine_kexec(struct kimage *image)
224 * segments, before I zap the gdt with an invalid value. 319 * segments, before I zap the gdt with an invalid value.
225 */ 320 */
226 load_segments(); 321 load_segments();
227 /* The gdt & idt are now invalid. 322 /*
323 * The gdt & idt are now invalid.
228 * If you want to load them you must set up your own idt & gdt. 324 * If you want to load them you must set up your own idt & gdt.
229 */ 325 */
230 set_gdt(phys_to_virt(0),0); 326 set_gdt(phys_to_virt(0), 0);
231 set_idt(phys_to_virt(0),0); 327 set_idt(phys_to_virt(0), 0);
232 328
233 /* now call it */ 329 /* now call it */
234 relocate_kernel((unsigned long)image->head, (unsigned long)page_list, 330 image->start = relocate_kernel((unsigned long)image->head,
235 image->start); 331 (unsigned long)page_list,
332 image->start,
333 image->preserve_context);
334
335#ifdef CONFIG_KEXEC_JUMP
336 if (kexec_image->preserve_context)
337 restore_processor_state();
338#endif
339
340 __ftrace_enabled_restore(save_ftrace_enabled);
236} 341}
237 342
238void arch_crash_save_vmcoreinfo(void) 343void arch_crash_save_vmcoreinfo(void)
diff --git a/arch/x86/kernel/mca_32.c b/arch/x86/kernel/mca_32.c
index 2dc183758be..845d80ce1ef 100644
--- a/arch/x86/kernel/mca_32.c
+++ b/arch/x86/kernel/mca_32.c
@@ -51,7 +51,6 @@
51#include <linux/ioport.h> 51#include <linux/ioport.h>
52#include <asm/uaccess.h> 52#include <asm/uaccess.h>
53#include <linux/init.h> 53#include <linux/init.h>
54#include <asm/arch_hooks.h>
55 54
56static unsigned char which_scsi; 55static unsigned char which_scsi;
57 56
@@ -474,6 +473,4 @@ void __kprobes mca_handle_nmi(void)
474 * adapter was responsible for the error. 473 * adapter was responsible for the error.
475 */ 474 */
476 bus_for_each_dev(&mca_bus_type, NULL, NULL, mca_handle_nmi_callback); 475 bus_for_each_dev(&mca_bus_type, NULL, NULL, mca_handle_nmi_callback);
477 476}
478 mca_nmi_hook();
479} /* mca_handle_nmi */
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
index b7f4c929e61..5e9f4fc5138 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/microcode_intel.c
@@ -87,9 +87,9 @@
87#include <linux/cpu.h> 87#include <linux/cpu.h>
88#include <linux/firmware.h> 88#include <linux/firmware.h>
89#include <linux/platform_device.h> 89#include <linux/platform_device.h>
90#include <linux/uaccess.h>
90 91
91#include <asm/msr.h> 92#include <asm/msr.h>
92#include <asm/uaccess.h>
93#include <asm/processor.h> 93#include <asm/processor.h>
94#include <asm/microcode.h> 94#include <asm/microcode.h>
95 95
@@ -196,7 +196,7 @@ static inline int update_match_cpu(struct cpu_signature *csig, int sig, int pf)
196 return (!sigmatch(sig, csig->sig, pf, csig->pf)) ? 0 : 1; 196 return (!sigmatch(sig, csig->sig, pf, csig->pf)) ? 0 : 1;
197} 197}
198 198
199static inline int 199static inline int
200update_match_revision(struct microcode_header_intel *mc_header, int rev) 200update_match_revision(struct microcode_header_intel *mc_header, int rev)
201{ 201{
202 return (mc_header->rev <= rev) ? 0 : 1; 202 return (mc_header->rev <= rev) ? 0 : 1;
@@ -442,8 +442,8 @@ static int request_microcode_fw(int cpu, struct device *device)
442 return ret; 442 return ret;
443 } 443 }
444 444
445 ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size, 445 ret = generic_load_microcode(cpu, (void *)firmware->data,
446 &get_ucode_fw); 446 firmware->size, &get_ucode_fw);
447 447
448 release_firmware(firmware); 448 release_firmware(firmware);
449 449
@@ -460,7 +460,7 @@ static int request_microcode_user(int cpu, const void __user *buf, size_t size)
460 /* We should bind the task to the CPU */ 460 /* We should bind the task to the CPU */
461 BUG_ON(cpu != raw_smp_processor_id()); 461 BUG_ON(cpu != raw_smp_processor_id());
462 462
463 return generic_load_microcode(cpu, (void*)buf, size, &get_ucode_user); 463 return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
464} 464}
465 465
466static void microcode_fini_cpu(int cpu) 466static void microcode_fini_cpu(int cpu)
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index 666e43df51f..712d15fdc41 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -226,7 +226,7 @@ static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d)
226 return 0; 226 return 0;
227} 227}
228 228
229static struct dmi_system_id __devinitdata mmconf_dmi_table[] = { 229static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
230 { 230 {
231 .callback = set_check_enable_amd_mmconf, 231 .callback = set_check_enable_amd_mmconf,
232 .ident = "Sun Microsystems Machine", 232 .ident = "Sun Microsystems Machine",
diff --git a/arch/x86/kernel/module_32.c b/arch/x86/kernel/module_32.c
index 3db0a5442eb..0edd819050e 100644
--- a/arch/x86/kernel/module_32.c
+++ b/arch/x86/kernel/module_32.c
@@ -42,7 +42,7 @@ void module_free(struct module *mod, void *module_region)
42{ 42{
43 vfree(module_region); 43 vfree(module_region);
44 /* FIXME: If module_region == mod->init_region, trim exception 44 /* FIXME: If module_region == mod->init_region, trim exception
45 table entries. */ 45 table entries. */
46} 46}
47 47
48/* We don't need anything special. */ 48/* We don't need anything special. */
@@ -113,13 +113,13 @@ int module_finalize(const Elf_Ehdr *hdr,
113 *para = NULL; 113 *para = NULL;
114 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 114 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
115 115
116 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 116 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
117 if (!strcmp(".text", secstrings + s->sh_name)) 117 if (!strcmp(".text", secstrings + s->sh_name))
118 text = s; 118 text = s;
119 if (!strcmp(".altinstructions", secstrings + s->sh_name)) 119 if (!strcmp(".altinstructions", secstrings + s->sh_name))
120 alt = s; 120 alt = s;
121 if (!strcmp(".smp_locks", secstrings + s->sh_name)) 121 if (!strcmp(".smp_locks", secstrings + s->sh_name))
122 locks= s; 122 locks = s;
123 if (!strcmp(".parainstructions", secstrings + s->sh_name)) 123 if (!strcmp(".parainstructions", secstrings + s->sh_name))
124 para = s; 124 para = s;
125 } 125 }
diff --git a/arch/x86/kernel/module_64.c b/arch/x86/kernel/module_64.c
index 6ba87830d4b..c23880b90b5 100644
--- a/arch/x86/kernel/module_64.c
+++ b/arch/x86/kernel/module_64.c
@@ -30,14 +30,14 @@
30#include <asm/page.h> 30#include <asm/page.h>
31#include <asm/pgtable.h> 31#include <asm/pgtable.h>
32 32
33#define DEBUGP(fmt...) 33#define DEBUGP(fmt...)
34 34
35#ifndef CONFIG_UML 35#ifndef CONFIG_UML
36void module_free(struct module *mod, void *module_region) 36void module_free(struct module *mod, void *module_region)
37{ 37{
38 vfree(module_region); 38 vfree(module_region);
39 /* FIXME: If module_region == mod->init_region, trim exception 39 /* FIXME: If module_region == mod->init_region, trim exception
40 table entries. */ 40 table entries. */
41} 41}
42 42
43void *module_alloc(unsigned long size) 43void *module_alloc(unsigned long size)
@@ -77,7 +77,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
77 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; 77 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
78 Elf64_Sym *sym; 78 Elf64_Sym *sym;
79 void *loc; 79 void *loc;
80 u64 val; 80 u64 val;
81 81
82 DEBUGP("Applying relocate section %u to %u\n", relsec, 82 DEBUGP("Applying relocate section %u to %u\n", relsec,
83 sechdrs[relsec].sh_info); 83 sechdrs[relsec].sh_info);
@@ -91,11 +91,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
91 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 91 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
92 + ELF64_R_SYM(rel[i].r_info); 92 + ELF64_R_SYM(rel[i].r_info);
93 93
94 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n", 94 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
95 (int)ELF64_R_TYPE(rel[i].r_info), 95 (int)ELF64_R_TYPE(rel[i].r_info),
96 sym->st_value, rel[i].r_addend, (u64)loc); 96 sym->st_value, rel[i].r_addend, (u64)loc);
97 97
98 val = sym->st_value + rel[i].r_addend; 98 val = sym->st_value + rel[i].r_addend;
99 99
100 switch (ELF64_R_TYPE(rel[i].r_info)) { 100 switch (ELF64_R_TYPE(rel[i].r_info)) {
101 case R_X86_64_NONE: 101 case R_X86_64_NONE:
@@ -113,16 +113,16 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
113 if ((s64)val != *(s32 *)loc) 113 if ((s64)val != *(s32 *)loc)
114 goto overflow; 114 goto overflow;
115 break; 115 break;
116 case R_X86_64_PC32: 116 case R_X86_64_PC32:
117 val -= (u64)loc; 117 val -= (u64)loc;
118 *(u32 *)loc = val; 118 *(u32 *)loc = val;
119#if 0 119#if 0
120 if ((s64)val != *(s32 *)loc) 120 if ((s64)val != *(s32 *)loc)
121 goto overflow; 121 goto overflow;
122#endif 122#endif
123 break; 123 break;
124 default: 124 default:
125 printk(KERN_ERR "module %s: Unknown rela relocation: %Lu\n", 125 printk(KERN_ERR "module %s: Unknown rela relocation: %llu\n",
126 me->name, ELF64_R_TYPE(rel[i].r_info)); 126 me->name, ELF64_R_TYPE(rel[i].r_info));
127 return -ENOEXEC; 127 return -ENOEXEC;
128 } 128 }
@@ -130,7 +130,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
130 return 0; 130 return 0;
131 131
132overflow: 132overflow:
133 printk(KERN_ERR "overflow in relocation type %d val %Lx\n", 133 printk(KERN_ERR "overflow in relocation type %d val %Lx\n",
134 (int)ELF64_R_TYPE(rel[i].r_info), val); 134 (int)ELF64_R_TYPE(rel[i].r_info), val);
135 printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n", 135 printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n",
136 me->name); 136 me->name);
@@ -143,13 +143,13 @@ int apply_relocate(Elf_Shdr *sechdrs,
143 unsigned int relsec, 143 unsigned int relsec,
144 struct module *me) 144 struct module *me)
145{ 145{
146 printk("non add relocation not supported\n"); 146 printk(KERN_ERR "non add relocation not supported\n");
147 return -ENOSYS; 147 return -ENOSYS;
148} 148}
149 149
150int module_finalize(const Elf_Ehdr *hdr, 150int module_finalize(const Elf_Ehdr *hdr,
151 const Elf_Shdr *sechdrs, 151 const Elf_Shdr *sechdrs,
152 struct module *me) 152 struct module *me)
153{ 153{
154 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL, 154 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
155 *para = NULL; 155 *para = NULL;
@@ -161,7 +161,7 @@ int module_finalize(const Elf_Ehdr *hdr,
161 if (!strcmp(".altinstructions", secstrings + s->sh_name)) 161 if (!strcmp(".altinstructions", secstrings + s->sh_name))
162 alt = s; 162 alt = s;
163 if (!strcmp(".smp_locks", secstrings + s->sh_name)) 163 if (!strcmp(".smp_locks", secstrings + s->sh_name))
164 locks= s; 164 locks = s;
165 if (!strcmp(".parainstructions", secstrings + s->sh_name)) 165 if (!strcmp(".parainstructions", secstrings + s->sh_name))
166 para = s; 166 para = s;
167 } 167 }
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index a649a4ccad4..47673e02ae5 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -3,7 +3,7 @@
3 * compliant MP-table parsing routines. 3 * compliant MP-table parsing routines.
4 * 4 *
5 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> 5 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> 6 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
7 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de> 7 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
8 */ 8 */
9 9
@@ -29,12 +29,7 @@
29#include <asm/setup.h> 29#include <asm/setup.h>
30#include <asm/smp.h> 30#include <asm/smp.h>
31 31
32#include <mach_apic.h> 32#include <asm/apic.h>
33#ifdef CONFIG_X86_32
34#include <mach_apicdef.h>
35#include <mach_mpparse.h>
36#endif
37
38/* 33/*
39 * Checksum an MP configuration block. 34 * Checksum an MP configuration block.
40 */ 35 */
@@ -144,11 +139,11 @@ static void __init MP_ioapic_info(struct mpc_ioapic *m)
144 if (bad_ioapic(m->apicaddr)) 139 if (bad_ioapic(m->apicaddr))
145 return; 140 return;
146 141
147 mp_ioapics[nr_ioapics].mp_apicaddr = m->apicaddr; 142 mp_ioapics[nr_ioapics].apicaddr = m->apicaddr;
148 mp_ioapics[nr_ioapics].mp_apicid = m->apicid; 143 mp_ioapics[nr_ioapics].apicid = m->apicid;
149 mp_ioapics[nr_ioapics].mp_type = m->type; 144 mp_ioapics[nr_ioapics].type = m->type;
150 mp_ioapics[nr_ioapics].mp_apicver = m->apicver; 145 mp_ioapics[nr_ioapics].apicver = m->apicver;
151 mp_ioapics[nr_ioapics].mp_flags = m->flags; 146 mp_ioapics[nr_ioapics].flags = m->flags;
152 nr_ioapics++; 147 nr_ioapics++;
153} 148}
154 149
@@ -160,55 +155,55 @@ static void print_MP_intsrc_info(struct mpc_intsrc *m)
160 m->srcbusirq, m->dstapic, m->dstirq); 155 m->srcbusirq, m->dstapic, m->dstirq);
161} 156}
162 157
163static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq) 158static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
164{ 159{
165 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," 160 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
166 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 161 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
167 mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3, 162 mp_irq->irqtype, mp_irq->irqflag & 3,
168 (mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus, 163 (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
169 mp_irq->mp_srcbusirq, mp_irq->mp_dstapic, mp_irq->mp_dstirq); 164 mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
170} 165}
171 166
172static void __init assign_to_mp_irq(struct mpc_intsrc *m, 167static void __init assign_to_mp_irq(struct mpc_intsrc *m,
173 struct mp_config_intsrc *mp_irq) 168 struct mpc_intsrc *mp_irq)
174{ 169{
175 mp_irq->mp_dstapic = m->dstapic; 170 mp_irq->dstapic = m->dstapic;
176 mp_irq->mp_type = m->type; 171 mp_irq->type = m->type;
177 mp_irq->mp_irqtype = m->irqtype; 172 mp_irq->irqtype = m->irqtype;
178 mp_irq->mp_irqflag = m->irqflag; 173 mp_irq->irqflag = m->irqflag;
179 mp_irq->mp_srcbus = m->srcbus; 174 mp_irq->srcbus = m->srcbus;
180 mp_irq->mp_srcbusirq = m->srcbusirq; 175 mp_irq->srcbusirq = m->srcbusirq;
181 mp_irq->mp_dstirq = m->dstirq; 176 mp_irq->dstirq = m->dstirq;
182} 177}
183 178
184static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq, 179static void __init assign_to_mpc_intsrc(struct mpc_intsrc *mp_irq,
185 struct mpc_intsrc *m) 180 struct mpc_intsrc *m)
186{ 181{
187 m->dstapic = mp_irq->mp_dstapic; 182 m->dstapic = mp_irq->dstapic;
188 m->type = mp_irq->mp_type; 183 m->type = mp_irq->type;
189 m->irqtype = mp_irq->mp_irqtype; 184 m->irqtype = mp_irq->irqtype;
190 m->irqflag = mp_irq->mp_irqflag; 185 m->irqflag = mp_irq->irqflag;
191 m->srcbus = mp_irq->mp_srcbus; 186 m->srcbus = mp_irq->srcbus;
192 m->srcbusirq = mp_irq->mp_srcbusirq; 187 m->srcbusirq = mp_irq->srcbusirq;
193 m->dstirq = mp_irq->mp_dstirq; 188 m->dstirq = mp_irq->dstirq;
194} 189}
195 190
196static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq, 191static int __init mp_irq_mpc_intsrc_cmp(struct mpc_intsrc *mp_irq,
197 struct mpc_intsrc *m) 192 struct mpc_intsrc *m)
198{ 193{
199 if (mp_irq->mp_dstapic != m->dstapic) 194 if (mp_irq->dstapic != m->dstapic)
200 return 1; 195 return 1;
201 if (mp_irq->mp_type != m->type) 196 if (mp_irq->type != m->type)
202 return 2; 197 return 2;
203 if (mp_irq->mp_irqtype != m->irqtype) 198 if (mp_irq->irqtype != m->irqtype)
204 return 3; 199 return 3;
205 if (mp_irq->mp_irqflag != m->irqflag) 200 if (mp_irq->irqflag != m->irqflag)
206 return 4; 201 return 4;
207 if (mp_irq->mp_srcbus != m->srcbus) 202 if (mp_irq->srcbus != m->srcbus)
208 return 5; 203 return 5;
209 if (mp_irq->mp_srcbusirq != m->srcbusirq) 204 if (mp_irq->srcbusirq != m->srcbusirq)
210 return 6; 205 return 6;
211 if (mp_irq->mp_dstirq != m->dstirq) 206 if (mp_irq->dstirq != m->dstirq)
212 return 7; 207 return 7;
213 208
214 return 0; 209 return 0;
@@ -292,16 +287,7 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
292 return 0; 287 return 0;
293 288
294#ifdef CONFIG_X86_32 289#ifdef CONFIG_X86_32
295 /* 290 generic_mps_oem_check(mpc, oem, str);
296 * need to make sure summit and es7000's mps_oem_check is safe to be
297 * called early via genericarch 's mps_oem_check
298 */
299 if (early) {
300#ifdef CONFIG_X86_NUMAQ
301 numaq_mps_oem_check(mpc, oem, str);
302#endif
303 } else
304 mps_oem_check(mpc, oem, str);
305#endif 291#endif
306 /* save the local APIC address, it might be non-default */ 292 /* save the local APIC address, it might be non-default */
307 if (!acpi_lapic) 293 if (!acpi_lapic)
@@ -386,13 +372,13 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
386 (*x86_quirks->mpc_record)++; 372 (*x86_quirks->mpc_record)++;
387 } 373 }
388 374
389#ifdef CONFIG_X86_GENERICARCH 375#ifdef CONFIG_X86_BIGSMP
390 generic_bigsmp_probe(); 376 generic_bigsmp_probe();
391#endif 377#endif
392 378
393#ifdef CONFIG_X86_32 379 if (apic->setup_apic_routing)
394 setup_apic_routing(); 380 apic->setup_apic_routing();
395#endif 381
396 if (!num_processors) 382 if (!num_processors)
397 printk(KERN_ERR "MPTABLE: no processors registered!\n"); 383 printk(KERN_ERR "MPTABLE: no processors registered!\n");
398 return num_processors; 384 return num_processors;
@@ -417,7 +403,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
417 intsrc.type = MP_INTSRC; 403 intsrc.type = MP_INTSRC;
418 intsrc.irqflag = 0; /* conforming */ 404 intsrc.irqflag = 0; /* conforming */
419 intsrc.srcbus = 0; 405 intsrc.srcbus = 0;
420 intsrc.dstapic = mp_ioapics[0].mp_apicid; 406 intsrc.dstapic = mp_ioapics[0].apicid;
421 407
422 intsrc.irqtype = mp_INT; 408 intsrc.irqtype = mp_INT;
423 409
@@ -570,14 +556,27 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
570 } 556 }
571} 557}
572 558
573static struct intel_mp_floating *mpf_found; 559static struct mpf_intel *mpf_found;
560
561static unsigned long __init get_mpc_size(unsigned long physptr)
562{
563 struct mpc_table *mpc;
564 unsigned long size;
565
566 mpc = early_ioremap(physptr, PAGE_SIZE);
567 size = mpc->length;
568 early_iounmap(mpc, PAGE_SIZE);
569 apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size);
570
571 return size;
572}
574 573
575/* 574/*
576 * Scan the memory blocks for an SMP configuration block. 575 * Scan the memory blocks for an SMP configuration block.
577 */ 576 */
578static void __init __get_smp_config(unsigned int early) 577static void __init __get_smp_config(unsigned int early)
579{ 578{
580 struct intel_mp_floating *mpf = mpf_found; 579 struct mpf_intel *mpf = mpf_found;
581 580
582 if (!mpf) 581 if (!mpf)
583 return; 582 return;
@@ -598,9 +597,9 @@ static void __init __get_smp_config(unsigned int early)
598 } 597 }
599 598
600 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", 599 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
601 mpf->mpf_specification); 600 mpf->specification);
602#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 601#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
603 if (mpf->mpf_feature2 & (1 << 7)) { 602 if (mpf->feature2 & (1 << 7)) {
604 printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); 603 printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
605 pic_mode = 1; 604 pic_mode = 1;
606 } else { 605 } else {
@@ -611,7 +610,7 @@ static void __init __get_smp_config(unsigned int early)
611 /* 610 /*
612 * Now see if we need to read further. 611 * Now see if we need to read further.
613 */ 612 */
614 if (mpf->mpf_feature1 != 0) { 613 if (mpf->feature1 != 0) {
615 if (early) { 614 if (early) {
616 /* 615 /*
617 * local APIC has default address 616 * local APIC has default address
@@ -621,16 +620,20 @@ static void __init __get_smp_config(unsigned int early)
621 } 620 }
622 621
623 printk(KERN_INFO "Default MP configuration #%d\n", 622 printk(KERN_INFO "Default MP configuration #%d\n",
624 mpf->mpf_feature1); 623 mpf->feature1);
625 construct_default_ISA_mptable(mpf->mpf_feature1); 624 construct_default_ISA_mptable(mpf->feature1);
626 625
627 } else if (mpf->mpf_physptr) { 626 } else if (mpf->physptr) {
627 struct mpc_table *mpc;
628 unsigned long size;
628 629
630 size = get_mpc_size(mpf->physptr);
631 mpc = early_ioremap(mpf->physptr, size);
629 /* 632 /*
630 * Read the physical hardware table. Anything here will 633 * Read the physical hardware table. Anything here will
631 * override the defaults. 634 * override the defaults.
632 */ 635 */
633 if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) { 636 if (!smp_read_mpc(mpc, early)) {
634#ifdef CONFIG_X86_LOCAL_APIC 637#ifdef CONFIG_X86_LOCAL_APIC
635 smp_found_config = 0; 638 smp_found_config = 0;
636#endif 639#endif
@@ -638,8 +641,10 @@ static void __init __get_smp_config(unsigned int early)
638 "BIOS bug, MP table errors detected!...\n"); 641 "BIOS bug, MP table errors detected!...\n");
639 printk(KERN_ERR "... disabling SMP support. " 642 printk(KERN_ERR "... disabling SMP support. "
640 "(tell your hw vendor)\n"); 643 "(tell your hw vendor)\n");
644 early_iounmap(mpc, size);
641 return; 645 return;
642 } 646 }
647 early_iounmap(mpc, size);
643 648
644 if (early) 649 if (early)
645 return; 650 return;
@@ -688,33 +693,33 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
688 unsigned reserve) 693 unsigned reserve)
689{ 694{
690 unsigned int *bp = phys_to_virt(base); 695 unsigned int *bp = phys_to_virt(base);
691 struct intel_mp_floating *mpf; 696 struct mpf_intel *mpf;
692 697
693 apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n", 698 apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
694 bp, length); 699 bp, length);
695 BUILD_BUG_ON(sizeof(*mpf) != 16); 700 BUILD_BUG_ON(sizeof(*mpf) != 16);
696 701
697 while (length > 0) { 702 while (length > 0) {
698 mpf = (struct intel_mp_floating *)bp; 703 mpf = (struct mpf_intel *)bp;
699 if ((*bp == SMP_MAGIC_IDENT) && 704 if ((*bp == SMP_MAGIC_IDENT) &&
700 (mpf->mpf_length == 1) && 705 (mpf->length == 1) &&
701 !mpf_checksum((unsigned char *)bp, 16) && 706 !mpf_checksum((unsigned char *)bp, 16) &&
702 ((mpf->mpf_specification == 1) 707 ((mpf->specification == 1)
703 || (mpf->mpf_specification == 4))) { 708 || (mpf->specification == 4))) {
704#ifdef CONFIG_X86_LOCAL_APIC 709#ifdef CONFIG_X86_LOCAL_APIC
705 smp_found_config = 1; 710 smp_found_config = 1;
706#endif 711#endif
707 mpf_found = mpf; 712 mpf_found = mpf;
708 713
709 printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n", 714 printk(KERN_INFO "found SMP MP-table at [%p] %llx\n",
710 mpf, virt_to_phys(mpf)); 715 mpf, (u64)virt_to_phys(mpf));
711 716
712 if (!reserve) 717 if (!reserve)
713 return 1; 718 return 1;
714 reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE, 719 reserve_bootmem_generic(virt_to_phys(mpf), sizeof(*mpf),
715 BOOTMEM_DEFAULT); 720 BOOTMEM_DEFAULT);
716 if (mpf->mpf_physptr) { 721 if (mpf->physptr) {
717 unsigned long size = PAGE_SIZE; 722 unsigned long size = get_mpc_size(mpf->physptr);
718#ifdef CONFIG_X86_32 723#ifdef CONFIG_X86_32
719 /* 724 /*
720 * We cannot access to MPC table to compute 725 * We cannot access to MPC table to compute
@@ -722,15 +727,24 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
722 * the bottom is mapped now. 727 * the bottom is mapped now.
723 * PC-9800's MPC table places on the very last 728 * PC-9800's MPC table places on the very last
724 * of physical memory; so that simply reserving 729 * of physical memory; so that simply reserving
725 * PAGE_SIZE from mpg->mpf_physptr yields BUG() 730 * PAGE_SIZE from mpf->physptr yields BUG()
726 * in reserve_bootmem. 731 * in reserve_bootmem.
732 * also need to make sure physptr is below than
733 * max_low_pfn
734 * we don't need reserve the area above max_low_pfn
727 */ 735 */
728 unsigned long end = max_low_pfn * PAGE_SIZE; 736 unsigned long end = max_low_pfn * PAGE_SIZE;
729 if (mpf->mpf_physptr + size > end) 737
730 size = end - mpf->mpf_physptr; 738 if (mpf->physptr < end) {
731#endif 739 if (mpf->physptr + size > end)
732 reserve_bootmem_generic(mpf->mpf_physptr, size, 740 size = end - mpf->physptr;
741 reserve_bootmem_generic(mpf->physptr, size,
742 BOOTMEM_DEFAULT);
743 }
744#else
745 reserve_bootmem_generic(mpf->physptr, size,
733 BOOTMEM_DEFAULT); 746 BOOTMEM_DEFAULT);
747#endif
734 } 748 }
735 749
736 return 1; 750 return 1;
@@ -809,15 +823,15 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
809 /* not legacy */ 823 /* not legacy */
810 824
811 for (i = 0; i < mp_irq_entries; i++) { 825 for (i = 0; i < mp_irq_entries; i++) {
812 if (mp_irqs[i].mp_irqtype != mp_INT) 826 if (mp_irqs[i].irqtype != mp_INT)
813 continue; 827 continue;
814 828
815 if (mp_irqs[i].mp_irqflag != 0x0f) 829 if (mp_irqs[i].irqflag != 0x0f)
816 continue; 830 continue;
817 831
818 if (mp_irqs[i].mp_srcbus != m->srcbus) 832 if (mp_irqs[i].srcbus != m->srcbus)
819 continue; 833 continue;
820 if (mp_irqs[i].mp_srcbusirq != m->srcbusirq) 834 if (mp_irqs[i].srcbusirq != m->srcbusirq)
821 continue; 835 continue;
822 if (irq_used[i]) { 836 if (irq_used[i]) {
823 /* already claimed */ 837 /* already claimed */
@@ -876,12 +890,12 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
876#ifdef CONFIG_X86_IO_APIC 890#ifdef CONFIG_X86_IO_APIC
877 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; 891 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
878 892
879 printk(KERN_INFO "OLD "); 893 apic_printk(APIC_VERBOSE, "OLD ");
880 print_MP_intsrc_info(m); 894 print_MP_intsrc_info(m);
881 i = get_MP_intsrc_index(m); 895 i = get_MP_intsrc_index(m);
882 if (i > 0) { 896 if (i > 0) {
883 assign_to_mpc_intsrc(&mp_irqs[i], m); 897 assign_to_mpc_intsrc(&mp_irqs[i], m);
884 printk(KERN_INFO "NEW "); 898 apic_printk(APIC_VERBOSE, "NEW ");
885 print_mp_irq_info(&mp_irqs[i]); 899 print_mp_irq_info(&mp_irqs[i]);
886 } else if (!i) { 900 } else if (!i) {
887 /* legacy, do nothing */ 901 /* legacy, do nothing */
@@ -922,14 +936,14 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
922 if (irq_used[i]) 936 if (irq_used[i])
923 continue; 937 continue;
924 938
925 if (mp_irqs[i].mp_irqtype != mp_INT) 939 if (mp_irqs[i].irqtype != mp_INT)
926 continue; 940 continue;
927 941
928 if (mp_irqs[i].mp_irqflag != 0x0f) 942 if (mp_irqs[i].irqflag != 0x0f)
929 continue; 943 continue;
930 944
931 if (nr_m_spare > 0) { 945 if (nr_m_spare > 0) {
932 printk(KERN_INFO "*NEW* found "); 946 apic_printk(APIC_VERBOSE, "*NEW* found\n");
933 nr_m_spare--; 947 nr_m_spare--;
934 assign_to_mpc_intsrc(&mp_irqs[i], m_spare[nr_m_spare]); 948 assign_to_mpc_intsrc(&mp_irqs[i], m_spare[nr_m_spare]);
935 m_spare[nr_m_spare] = NULL; 949 m_spare[nr_m_spare] = NULL;
@@ -1001,7 +1015,7 @@ static int __init update_mp_table(void)
1001{ 1015{
1002 char str[16]; 1016 char str[16];
1003 char oem[10]; 1017 char oem[10];
1004 struct intel_mp_floating *mpf; 1018 struct mpf_intel *mpf;
1005 struct mpc_table *mpc, *mpc_new; 1019 struct mpc_table *mpc, *mpc_new;
1006 1020
1007 if (!enable_update_mptable) 1021 if (!enable_update_mptable)
@@ -1014,19 +1028,19 @@ static int __init update_mp_table(void)
1014 /* 1028 /*
1015 * Now see if we need to go further. 1029 * Now see if we need to go further.
1016 */ 1030 */
1017 if (mpf->mpf_feature1 != 0) 1031 if (mpf->feature1 != 0)
1018 return 0; 1032 return 0;
1019 1033
1020 if (!mpf->mpf_physptr) 1034 if (!mpf->physptr)
1021 return 0; 1035 return 0;
1022 1036
1023 mpc = phys_to_virt(mpf->mpf_physptr); 1037 mpc = phys_to_virt(mpf->physptr);
1024 1038
1025 if (!smp_check_mpc(mpc, oem, str)) 1039 if (!smp_check_mpc(mpc, oem, str))
1026 return 0; 1040 return 0;
1027 1041
1028 printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf)); 1042 printk(KERN_INFO "mpf: %llx\n", (u64)virt_to_phys(mpf));
1029 printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr); 1043 printk(KERN_INFO "physptr: %x\n", mpf->physptr);
1030 1044
1031 if (mpc_new_phys && mpc->length > mpc_new_length) { 1045 if (mpc_new_phys && mpc->length > mpc_new_length) {
1032 mpc_new_phys = 0; 1046 mpc_new_phys = 0;
@@ -1047,23 +1061,23 @@ static int __init update_mp_table(void)
1047 } 1061 }
1048 printk(KERN_INFO "use in-positon replacing\n"); 1062 printk(KERN_INFO "use in-positon replacing\n");
1049 } else { 1063 } else {
1050 mpf->mpf_physptr = mpc_new_phys; 1064 mpf->physptr = mpc_new_phys;
1051 mpc_new = phys_to_virt(mpc_new_phys); 1065 mpc_new = phys_to_virt(mpc_new_phys);
1052 memcpy(mpc_new, mpc, mpc->length); 1066 memcpy(mpc_new, mpc, mpc->length);
1053 mpc = mpc_new; 1067 mpc = mpc_new;
1054 /* check if we can modify that */ 1068 /* check if we can modify that */
1055 if (mpc_new_phys - mpf->mpf_physptr) { 1069 if (mpc_new_phys - mpf->physptr) {
1056 struct intel_mp_floating *mpf_new; 1070 struct mpf_intel *mpf_new;
1057 /* steal 16 bytes from [0, 1k) */ 1071 /* steal 16 bytes from [0, 1k) */
1058 printk(KERN_INFO "mpf new: %x\n", 0x400 - 16); 1072 printk(KERN_INFO "mpf new: %x\n", 0x400 - 16);
1059 mpf_new = phys_to_virt(0x400 - 16); 1073 mpf_new = phys_to_virt(0x400 - 16);
1060 memcpy(mpf_new, mpf, 16); 1074 memcpy(mpf_new, mpf, 16);
1061 mpf = mpf_new; 1075 mpf = mpf_new;
1062 mpf->mpf_physptr = mpc_new_phys; 1076 mpf->physptr = mpc_new_phys;
1063 } 1077 }
1064 mpf->mpf_checksum = 0; 1078 mpf->checksum = 0;
1065 mpf->mpf_checksum -= mpf_checksum((unsigned char *)mpf, 16); 1079 mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
1066 printk(KERN_INFO "mpf_physptr new: %x\n", mpf->mpf_physptr); 1080 printk(KERN_INFO "physptr new: %x\n", mpf->physptr);
1067 } 1081 }
1068 1082
1069 /* 1083 /*
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 726266695b2..3cf3413ec62 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -35,10 +35,10 @@
35#include <linux/device.h> 35#include <linux/device.h>
36#include <linux/cpu.h> 36#include <linux/cpu.h>
37#include <linux/notifier.h> 37#include <linux/notifier.h>
38#include <linux/uaccess.h>
38 39
39#include <asm/processor.h> 40#include <asm/processor.h>
40#include <asm/msr.h> 41#include <asm/msr.h>
41#include <asm/uaccess.h>
42#include <asm/system.h> 42#include <asm/system.h>
43 43
44static struct class *msr_class; 44static struct class *msr_class;
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c
deleted file mode 100644
index f2191d4f271..00000000000
--- a/arch/x86/kernel/numaq_32.c
+++ /dev/null
@@ -1,293 +0,0 @@
1/*
2 * Written by: Patricia Gaughen, IBM Corporation
3 *
4 * Copyright (C) 2002, IBM Corp.
5 *
6 * All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 * Send feedback to <gone@us.ibm.com>
24 */
25
26#include <linux/mm.h>
27#include <linux/bootmem.h>
28#include <linux/mmzone.h>
29#include <linux/module.h>
30#include <linux/nodemask.h>
31#include <asm/numaq.h>
32#include <asm/topology.h>
33#include <asm/processor.h>
34#include <asm/genapic.h>
35#include <asm/e820.h>
36#include <asm/setup.h>
37
38#define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT))
39
40/*
41 * Function: smp_dump_qct()
42 *
43 * Description: gets memory layout from the quad config table. This
44 * function also updates node_online_map with the nodes (quads) present.
45 */
46static void __init smp_dump_qct(void)
47{
48 int node;
49 struct eachquadmem *eq;
50 struct sys_cfg_data *scd =
51 (struct sys_cfg_data *)__va(SYS_CFG_DATA_PRIV_ADDR);
52
53 nodes_clear(node_online_map);
54 for_each_node(node) {
55 if (scd->quads_present31_0 & (1 << node)) {
56 node_set_online(node);
57 eq = &scd->eq[node];
58 /* Convert to pages */
59 node_start_pfn[node] = MB_TO_PAGES(
60 eq->hi_shrd_mem_start - eq->priv_mem_size);
61 node_end_pfn[node] = MB_TO_PAGES(
62 eq->hi_shrd_mem_start + eq->hi_shrd_mem_size);
63
64 e820_register_active_regions(node, node_start_pfn[node],
65 node_end_pfn[node]);
66 memory_present(node,
67 node_start_pfn[node], node_end_pfn[node]);
68 node_remap_size[node] = node_memmap_size_bytes(node,
69 node_start_pfn[node],
70 node_end_pfn[node]);
71 }
72 }
73}
74
75
76void __cpuinit numaq_tsc_disable(void)
77{
78 if (!found_numaq)
79 return;
80
81 if (num_online_nodes() > 1) {
82 printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
83 setup_clear_cpu_cap(X86_FEATURE_TSC);
84 }
85}
86
87static int __init numaq_pre_time_init(void)
88{
89 numaq_tsc_disable();
90 return 0;
91}
92
93int found_numaq;
94/*
95 * Have to match translation table entries to main table entries by counter
96 * hence the mpc_record variable .... can't see a less disgusting way of
97 * doing this ....
98 */
99struct mpc_config_translation {
100 unsigned char mpc_type;
101 unsigned char trans_len;
102 unsigned char trans_type;
103 unsigned char trans_quad;
104 unsigned char trans_global;
105 unsigned char trans_local;
106 unsigned short trans_reserved;
107};
108
109/* x86_quirks member */
110static int mpc_record;
111static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY]
112 __cpuinitdata;
113
114static inline int generate_logical_apicid(int quad, int phys_apicid)
115{
116 return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1);
117}
118
119/* x86_quirks member */
120static int mpc_apic_id(struct mpc_cpu *m)
121{
122 int quad = translation_table[mpc_record]->trans_quad;
123 int logical_apicid = generate_logical_apicid(quad, m->apicid);
124
125 printk(KERN_DEBUG "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n",
126 m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8,
127 (m->cpufeature & CPU_MODEL_MASK) >> 4,
128 m->apicver, quad, logical_apicid);
129 return logical_apicid;
130}
131
132int mp_bus_id_to_node[MAX_MP_BUSSES];
133
134int mp_bus_id_to_local[MAX_MP_BUSSES];
135
136/* x86_quirks member */
137static void mpc_oem_bus_info(struct mpc_bus *m, char *name)
138{
139 int quad = translation_table[mpc_record]->trans_quad;
140 int local = translation_table[mpc_record]->trans_local;
141
142 mp_bus_id_to_node[m->busid] = quad;
143 mp_bus_id_to_local[m->busid] = local;
144 printk(KERN_INFO "Bus #%d is %s (node %d)\n",
145 m->busid, name, quad);
146}
147
148int quad_local_to_mp_bus_id [NR_CPUS/4][4];
149
150/* x86_quirks member */
151static void mpc_oem_pci_bus(struct mpc_bus *m)
152{
153 int quad = translation_table[mpc_record]->trans_quad;
154 int local = translation_table[mpc_record]->trans_local;
155
156 quad_local_to_mp_bus_id[quad][local] = m->busid;
157}
158
159static void __init MP_translation_info(struct mpc_config_translation *m)
160{
161 printk(KERN_INFO
162 "Translation: record %d, type %d, quad %d, global %d, local %d\n",
163 mpc_record, m->trans_type, m->trans_quad, m->trans_global,
164 m->trans_local);
165
166 if (mpc_record >= MAX_MPC_ENTRY)
167 printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
168 else
169 translation_table[mpc_record] = m; /* stash this for later */
170 if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
171 node_set_online(m->trans_quad);
172}
173
174static int __init mpf_checksum(unsigned char *mp, int len)
175{
176 int sum = 0;
177
178 while (len--)
179 sum += *mp++;
180
181 return sum & 0xFF;
182}
183
184/*
185 * Read/parse the MPC oem tables
186 */
187
188static void __init smp_read_mpc_oem(struct mpc_oemtable *oemtable,
189 unsigned short oemsize)
190{
191 int count = sizeof(*oemtable); /* the header size */
192 unsigned char *oemptr = ((unsigned char *)oemtable) + count;
193
194 mpc_record = 0;
195 printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n",
196 oemtable);
197 if (memcmp(oemtable->signature, MPC_OEM_SIGNATURE, 4)) {
198 printk(KERN_WARNING
199 "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
200 oemtable->signature[0], oemtable->signature[1],
201 oemtable->signature[2], oemtable->signature[3]);
202 return;
203 }
204 if (mpf_checksum((unsigned char *)oemtable, oemtable->length)) {
205 printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
206 return;
207 }
208 while (count < oemtable->length) {
209 switch (*oemptr) {
210 case MP_TRANSLATION:
211 {
212 struct mpc_config_translation *m =
213 (struct mpc_config_translation *)oemptr;
214 MP_translation_info(m);
215 oemptr += sizeof(*m);
216 count += sizeof(*m);
217 ++mpc_record;
218 break;
219 }
220 default:
221 {
222 printk(KERN_WARNING
223 "Unrecognised OEM table entry type! - %d\n",
224 (int)*oemptr);
225 return;
226 }
227 }
228 }
229}
230
231static int __init numaq_setup_ioapic_ids(void)
232{
233 /* so can skip it */
234 return 1;
235}
236
237static int __init numaq_update_genapic(void)
238{
239 genapic->wakeup_cpu = wakeup_secondary_cpu_via_nmi;
240
241 return 0;
242}
243
244static struct x86_quirks numaq_x86_quirks __initdata = {
245 .arch_pre_time_init = numaq_pre_time_init,
246 .arch_time_init = NULL,
247 .arch_pre_intr_init = NULL,
248 .arch_memory_setup = NULL,
249 .arch_intr_init = NULL,
250 .arch_trap_init = NULL,
251 .mach_get_smp_config = NULL,
252 .mach_find_smp_config = NULL,
253 .mpc_record = &mpc_record,
254 .mpc_apic_id = mpc_apic_id,
255 .mpc_oem_bus_info = mpc_oem_bus_info,
256 .mpc_oem_pci_bus = mpc_oem_pci_bus,
257 .smp_read_mpc_oem = smp_read_mpc_oem,
258 .setup_ioapic_ids = numaq_setup_ioapic_ids,
259 .update_genapic = numaq_update_genapic,
260};
261
262void numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
263{
264 if (strncmp(oem, "IBM NUMA", 8))
265 printk("Warning! Not a NUMA-Q system!\n");
266 else
267 found_numaq = 1;
268}
269
270static __init void early_check_numaq(void)
271{
272 /*
273 * Find possible boot-time SMP configuration:
274 */
275 early_find_smp_config();
276 /*
277 * get boot-time SMP configuration:
278 */
279 if (smp_found_config)
280 early_get_smp_config();
281
282 if (found_numaq)
283 x86_quirks = &numaq_x86_quirks;
284}
285
286int __init get_memcfg_numaq(void)
287{
288 early_check_numaq();
289 if (!found_numaq)
290 return 0;
291 smp_dump_qct();
292 return 1;
293}
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 95777b0faa7..3a7c5a44082 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -26,13 +26,3 @@ struct pv_lock_ops pv_lock_ops = {
26}; 26};
27EXPORT_SYMBOL(pv_lock_ops); 27EXPORT_SYMBOL(pv_lock_ops);
28 28
29void __init paravirt_use_bytelocks(void)
30{
31#ifdef CONFIG_SMP
32 pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
33 pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
34 pv_lock_ops.spin_lock = __byte_spin_lock;
35 pv_lock_ops.spin_trylock = __byte_spin_trylock;
36 pv_lock_ops.spin_unlock = __byte_spin_unlock;
37#endif
38}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index c6520a4e85d..63dd358d8ee 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -28,7 +28,6 @@
28#include <asm/paravirt.h> 28#include <asm/paravirt.h>
29#include <asm/desc.h> 29#include <asm/desc.h>
30#include <asm/setup.h> 30#include <asm/setup.h>
31#include <asm/arch_hooks.h>
32#include <asm/pgtable.h> 31#include <asm/pgtable.h>
33#include <asm/time.h> 32#include <asm/time.h>
34#include <asm/pgalloc.h> 33#include <asm/pgalloc.h>
@@ -44,6 +43,17 @@ void _paravirt_nop(void)
44{ 43{
45} 44}
46 45
46/* identity function, which can be inlined */
47u32 _paravirt_ident_32(u32 x)
48{
49 return x;
50}
51
52u64 _paravirt_ident_64(u64 x)
53{
54 return x;
55}
56
47static void __init default_banner(void) 57static void __init default_banner(void)
48{ 58{
49 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", 59 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
@@ -138,9 +148,16 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
138 if (opfunc == NULL) 148 if (opfunc == NULL)
139 /* If there's no function, patch it with a ud2a (BUG) */ 149 /* If there's no function, patch it with a ud2a (BUG) */
140 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); 150 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
141 else if (opfunc == paravirt_nop) 151 else if (opfunc == _paravirt_nop)
142 /* If the operation is a nop, then nop the callsite */ 152 /* If the operation is a nop, then nop the callsite */
143 ret = paravirt_patch_nop(); 153 ret = paravirt_patch_nop();
154
155 /* identity functions just return their single argument */
156 else if (opfunc == _paravirt_ident_32)
157 ret = paravirt_patch_ident_32(insnbuf, len);
158 else if (opfunc == _paravirt_ident_64)
159 ret = paravirt_patch_ident_64(insnbuf, len);
160
144 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || 161 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
145 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) || 162 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
146 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) || 163 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
@@ -318,10 +335,10 @@ struct pv_time_ops pv_time_ops = {
318 335
319struct pv_irq_ops pv_irq_ops = { 336struct pv_irq_ops pv_irq_ops = {
320 .init_IRQ = native_init_IRQ, 337 .init_IRQ = native_init_IRQ,
321 .save_fl = native_save_fl, 338 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
322 .restore_fl = native_restore_fl, 339 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
323 .irq_disable = native_irq_disable, 340 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
324 .irq_enable = native_irq_enable, 341 .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
325 .safe_halt = native_safe_halt, 342 .safe_halt = native_safe_halt,
326 .halt = native_halt, 343 .halt = native_halt,
327#ifdef CONFIG_X86_64 344#ifdef CONFIG_X86_64
@@ -399,6 +416,14 @@ struct pv_apic_ops pv_apic_ops = {
399#endif 416#endif
400}; 417};
401 418
419#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
420/* 32-bit pagetable entries */
421#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
422#else
423/* 64-bit pagetable entries */
424#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
425#endif
426
402struct pv_mmu_ops pv_mmu_ops = { 427struct pv_mmu_ops pv_mmu_ops = {
403#ifndef CONFIG_X86_64 428#ifndef CONFIG_X86_64
404 .pagetable_setup_start = native_pagetable_setup_start, 429 .pagetable_setup_start = native_pagetable_setup_start,
@@ -450,22 +475,23 @@ struct pv_mmu_ops pv_mmu_ops = {
450 .pmd_clear = native_pmd_clear, 475 .pmd_clear = native_pmd_clear,
451#endif 476#endif
452 .set_pud = native_set_pud, 477 .set_pud = native_set_pud,
453 .pmd_val = native_pmd_val, 478
454 .make_pmd = native_make_pmd, 479 .pmd_val = PTE_IDENT,
480 .make_pmd = PTE_IDENT,
455 481
456#if PAGETABLE_LEVELS == 4 482#if PAGETABLE_LEVELS == 4
457 .pud_val = native_pud_val, 483 .pud_val = PTE_IDENT,
458 .make_pud = native_make_pud, 484 .make_pud = PTE_IDENT,
485
459 .set_pgd = native_set_pgd, 486 .set_pgd = native_set_pgd,
460#endif 487#endif
461#endif /* PAGETABLE_LEVELS >= 3 */ 488#endif /* PAGETABLE_LEVELS >= 3 */
462 489
463 .pte_val = native_pte_val, 490 .pte_val = PTE_IDENT,
464 .pte_flags = native_pte_flags, 491 .pgd_val = PTE_IDENT,
465 .pgd_val = native_pgd_val,
466 492
467 .make_pte = native_make_pte, 493 .make_pte = PTE_IDENT,
468 .make_pgd = native_make_pgd, 494 .make_pgd = PTE_IDENT,
469 495
470 .dup_mmap = paravirt_nop, 496 .dup_mmap = paravirt_nop,
471 .exit_mmap = paravirt_nop, 497 .exit_mmap = paravirt_nop,
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index 9fe644f4861..d9f32e6d6ab 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -12,6 +12,18 @@ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
12DEF_NATIVE(pv_cpu_ops, clts, "clts"); 12DEF_NATIVE(pv_cpu_ops, clts, "clts");
13DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc"); 13DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
14 14
15unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
16{
17 /* arg in %eax, return in %eax */
18 return 0;
19}
20
21unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
22{
23 /* arg in %edx:%eax, return in %edx:%eax */
24 return 0;
25}
26
15unsigned native_patch(u8 type, u16 clobbers, void *ibuf, 27unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
16 unsigned long addr, unsigned len) 28 unsigned long addr, unsigned len)
17{ 29{
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index 061d01df9ae..3f08f34f93e 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -19,6 +19,21 @@ DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
19DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl"); 19DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl");
20DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); 20DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
21 21
22DEF_NATIVE(, mov32, "mov %edi, %eax");
23DEF_NATIVE(, mov64, "mov %rdi, %rax");
24
25unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
26{
27 return paravirt_patch_insns(insnbuf, len,
28 start__mov32, end__mov32);
29}
30
31unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
32{
33 return paravirt_patch_insns(insnbuf, len,
34 start__mov64, end__mov64);
35}
36
22unsigned native_patch(u8 type, u16 clobbers, void *ibuf, 37unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
23 unsigned long addr, unsigned len) 38 unsigned long addr, unsigned len)
24{ 39{
diff --git a/arch/x86/kernel/probe_roms_32.c b/arch/x86/kernel/probe_roms_32.c
index 675a48c404a..071e7fea42e 100644
--- a/arch/x86/kernel/probe_roms_32.c
+++ b/arch/x86/kernel/probe_roms_32.c
@@ -18,7 +18,7 @@
18#include <asm/setup.h> 18#include <asm/setup.h>
19#include <asm/sections.h> 19#include <asm/sections.h>
20#include <asm/io.h> 20#include <asm/io.h>
21#include <setup_arch.h> 21#include <asm/setup_arch.h>
22 22
23static struct resource system_rom_resource = { 23static struct resource system_rom_resource = {
24 .name = "System ROM", 24 .name = "System ROM",
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 6d12f7e37f8..6afa5232dbb 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -1,8 +1,8 @@
1#include <linux/errno.h> 1#include <linux/errno.h>
2#include <linux/kernel.h> 2#include <linux/kernel.h>
3#include <linux/mm.h> 3#include <linux/mm.h>
4#include <asm/idle.h>
5#include <linux/smp.h> 4#include <linux/smp.h>
5#include <linux/prctl.h>
6#include <linux/slab.h> 6#include <linux/slab.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/module.h> 8#include <linux/module.h>
@@ -11,6 +11,9 @@
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12#include <asm/system.h> 12#include <asm/system.h>
13#include <asm/apic.h> 13#include <asm/apic.h>
14#include <asm/idle.h>
15#include <asm/uaccess.h>
16#include <asm/i387.h>
14 17
15unsigned long idle_halt; 18unsigned long idle_halt;
16EXPORT_SYMBOL(idle_halt); 19EXPORT_SYMBOL(idle_halt);
@@ -56,6 +59,192 @@ void arch_task_cache_init(void)
56} 59}
57 60
58/* 61/*
62 * Free current thread data structures etc..
63 */
64void exit_thread(void)
65{
66 struct task_struct *me = current;
67 struct thread_struct *t = &me->thread;
68
69 if (me->thread.io_bitmap_ptr) {
70 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
71
72 kfree(t->io_bitmap_ptr);
73 t->io_bitmap_ptr = NULL;
74 clear_thread_flag(TIF_IO_BITMAP);
75 /*
76 * Careful, clear this in the TSS too:
77 */
78 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
79 t->io_bitmap_max = 0;
80 put_cpu();
81 }
82
83 ds_exit_thread(current);
84}
85
86void flush_thread(void)
87{
88 struct task_struct *tsk = current;
89
90#ifdef CONFIG_X86_64
91 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
92 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
93 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
94 clear_tsk_thread_flag(tsk, TIF_IA32);
95 } else {
96 set_tsk_thread_flag(tsk, TIF_IA32);
97 current_thread_info()->status |= TS_COMPAT;
98 }
99 }
100#endif
101
102 clear_tsk_thread_flag(tsk, TIF_DEBUG);
103
104 tsk->thread.debugreg0 = 0;
105 tsk->thread.debugreg1 = 0;
106 tsk->thread.debugreg2 = 0;
107 tsk->thread.debugreg3 = 0;
108 tsk->thread.debugreg6 = 0;
109 tsk->thread.debugreg7 = 0;
110 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
111 /*
112 * Forget coprocessor state..
113 */
114 tsk->fpu_counter = 0;
115 clear_fpu(tsk);
116 clear_used_math();
117}
118
119static void hard_disable_TSC(void)
120{
121 write_cr4(read_cr4() | X86_CR4_TSD);
122}
123
124void disable_TSC(void)
125{
126 preempt_disable();
127 if (!test_and_set_thread_flag(TIF_NOTSC))
128 /*
129 * Must flip the CPU state synchronously with
130 * TIF_NOTSC in the current running context.
131 */
132 hard_disable_TSC();
133 preempt_enable();
134}
135
136static void hard_enable_TSC(void)
137{
138 write_cr4(read_cr4() & ~X86_CR4_TSD);
139}
140
141static void enable_TSC(void)
142{
143 preempt_disable();
144 if (test_and_clear_thread_flag(TIF_NOTSC))
145 /*
146 * Must flip the CPU state synchronously with
147 * TIF_NOTSC in the current running context.
148 */
149 hard_enable_TSC();
150 preempt_enable();
151}
152
153int get_tsc_mode(unsigned long adr)
154{
155 unsigned int val;
156
157 if (test_thread_flag(TIF_NOTSC))
158 val = PR_TSC_SIGSEGV;
159 else
160 val = PR_TSC_ENABLE;
161
162 return put_user(val, (unsigned int __user *)adr);
163}
164
165int set_tsc_mode(unsigned int val)
166{
167 if (val == PR_TSC_SIGSEGV)
168 disable_TSC();
169 else if (val == PR_TSC_ENABLE)
170 enable_TSC();
171 else
172 return -EINVAL;
173
174 return 0;
175}
176
177void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
178 struct tss_struct *tss)
179{
180 struct thread_struct *prev, *next;
181
182 prev = &prev_p->thread;
183 next = &next_p->thread;
184
185 if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
186 test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
187 ds_switch_to(prev_p, next_p);
188 else if (next->debugctlmsr != prev->debugctlmsr)
189 update_debugctlmsr(next->debugctlmsr);
190
191 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
192 set_debugreg(next->debugreg0, 0);
193 set_debugreg(next->debugreg1, 1);
194 set_debugreg(next->debugreg2, 2);
195 set_debugreg(next->debugreg3, 3);
196 /* no 4 and 5 */
197 set_debugreg(next->debugreg6, 6);
198 set_debugreg(next->debugreg7, 7);
199 }
200
201 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
202 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
203 /* prev and next are different */
204 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
205 hard_disable_TSC();
206 else
207 hard_enable_TSC();
208 }
209
210 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
211 /*
212 * Copy the relevant range of the IO bitmap.
213 * Normally this is 128 bytes or less:
214 */
215 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
216 max(prev->io_bitmap_max, next->io_bitmap_max));
217 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
218 /*
219 * Clear any possible leftover bits:
220 */
221 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
222 }
223}
224
225int sys_fork(struct pt_regs *regs)
226{
227 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
228}
229
230/*
231 * This is trivial, and on the face of it looks like it
232 * could equally well be done in user mode.
233 *
234 * Not so, for quite unobvious reasons - register pressure.
235 * In user mode vfork() cannot have a stack frame, and if
236 * done by calling the "clone()" system call directly, you
237 * do not have enough call-clobbered registers to hold all
238 * the information you need.
239 */
240int sys_vfork(struct pt_regs *regs)
241{
242 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
243 NULL, NULL);
244}
245
246
247/*
59 * Idle related variables and functions 248 * Idle related variables and functions
60 */ 249 */
61unsigned long boot_option_idle_override = 0; 250unsigned long boot_option_idle_override = 0;
@@ -350,7 +539,7 @@ static void c1e_idle(void)
350 539
351void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 540void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
352{ 541{
353#ifdef CONFIG_X86_SMP 542#ifdef CONFIG_SMP
354 if (pm_idle == poll_idle && smp_num_siblings > 1) { 543 if (pm_idle == poll_idle && smp_num_siblings > 1) {
355 printk(KERN_WARNING "WARNING: polling idle and HT enabled," 544 printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
356 " performance may degrade.\n"); 545 " performance may degrade.\n");
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index bd4da2af08a..14014d766ca 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -11,6 +11,7 @@
11 11
12#include <stdarg.h> 12#include <stdarg.h>
13 13
14#include <linux/stackprotector.h>
14#include <linux/cpu.h> 15#include <linux/cpu.h>
15#include <linux/errno.h> 16#include <linux/errno.h>
16#include <linux/sched.h> 17#include <linux/sched.h>
@@ -66,9 +67,6 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
66DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 67DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
67EXPORT_PER_CPU_SYMBOL(current_task); 68EXPORT_PER_CPU_SYMBOL(current_task);
68 69
69DEFINE_PER_CPU(int, cpu_number);
70EXPORT_PER_CPU_SYMBOL(cpu_number);
71
72/* 70/*
73 * Return saved PC of a blocked thread. 71 * Return saved PC of a blocked thread.
74 */ 72 */
@@ -94,6 +92,15 @@ void cpu_idle(void)
94{ 92{
95 int cpu = smp_processor_id(); 93 int cpu = smp_processor_id();
96 94
95 /*
96 * If we're the non-boot CPU, nothing set the stack canary up
97 * for us. CPU0 already has it initialized but no harm in
98 * doing it again. This is a good place for updating it, as
99 * we wont ever return from this function (so the invalid
100 * canaries already on the stack wont ever trigger).
101 */
102 boot_init_stack_canary();
103
97 current_thread_info()->status |= TS_POLLING; 104 current_thread_info()->status |= TS_POLLING;
98 105
99 /* endless idle loop with no priority at all */ 106 /* endless idle loop with no priority at all */
@@ -108,7 +115,6 @@ void cpu_idle(void)
108 play_dead(); 115 play_dead();
109 116
110 local_irq_disable(); 117 local_irq_disable();
111 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
112 /* Don't trace irqs off for idle */ 118 /* Don't trace irqs off for idle */
113 stop_critical_timings(); 119 stop_critical_timings();
114 pm_idle(); 120 pm_idle();
@@ -132,7 +138,7 @@ void __show_regs(struct pt_regs *regs, int all)
132 if (user_mode_vm(regs)) { 138 if (user_mode_vm(regs)) {
133 sp = regs->sp; 139 sp = regs->sp;
134 ss = regs->ss & 0xffff; 140 ss = regs->ss & 0xffff;
135 savesegment(gs, gs); 141 gs = get_user_gs(regs);
136 } else { 142 } else {
137 sp = (unsigned long) (&regs->sp); 143 sp = (unsigned long) (&regs->sp);
138 savesegment(ss, ss); 144 savesegment(ss, ss);
@@ -213,6 +219,7 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
213 regs.ds = __USER_DS; 219 regs.ds = __USER_DS;
214 regs.es = __USER_DS; 220 regs.es = __USER_DS;
215 regs.fs = __KERNEL_PERCPU; 221 regs.fs = __KERNEL_PERCPU;
222 regs.gs = __KERNEL_STACK_CANARY;
216 regs.orig_ax = -1; 223 regs.orig_ax = -1;
217 regs.ip = (unsigned long) kernel_thread_helper; 224 regs.ip = (unsigned long) kernel_thread_helper;
218 regs.cs = __KERNEL_CS | get_kernel_rpl(); 225 regs.cs = __KERNEL_CS | get_kernel_rpl();
@@ -223,55 +230,6 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
223} 230}
224EXPORT_SYMBOL(kernel_thread); 231EXPORT_SYMBOL(kernel_thread);
225 232
226/*
227 * Free current thread data structures etc..
228 */
229void exit_thread(void)
230{
231 /* The process may have allocated an io port bitmap... nuke it. */
232 if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
233 struct task_struct *tsk = current;
234 struct thread_struct *t = &tsk->thread;
235 int cpu = get_cpu();
236 struct tss_struct *tss = &per_cpu(init_tss, cpu);
237
238 kfree(t->io_bitmap_ptr);
239 t->io_bitmap_ptr = NULL;
240 clear_thread_flag(TIF_IO_BITMAP);
241 /*
242 * Careful, clear this in the TSS too:
243 */
244 memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
245 t->io_bitmap_max = 0;
246 tss->io_bitmap_owner = NULL;
247 tss->io_bitmap_max = 0;
248 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
249 put_cpu();
250 }
251
252 ds_exit_thread(current);
253}
254
255void flush_thread(void)
256{
257 struct task_struct *tsk = current;
258
259 tsk->thread.debugreg0 = 0;
260 tsk->thread.debugreg1 = 0;
261 tsk->thread.debugreg2 = 0;
262 tsk->thread.debugreg3 = 0;
263 tsk->thread.debugreg6 = 0;
264 tsk->thread.debugreg7 = 0;
265 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
266 clear_tsk_thread_flag(tsk, TIF_DEBUG);
267 /*
268 * Forget coprocessor state..
269 */
270 tsk->fpu_counter = 0;
271 clear_fpu(tsk);
272 clear_used_math();
273}
274
275void release_thread(struct task_struct *dead_task) 233void release_thread(struct task_struct *dead_task)
276{ 234{
277 BUG_ON(dead_task->mm); 235 BUG_ON(dead_task->mm);
@@ -305,7 +263,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
305 263
306 p->thread.ip = (unsigned long) ret_from_fork; 264 p->thread.ip = (unsigned long) ret_from_fork;
307 265
308 savesegment(gs, p->thread.gs); 266 task_user_gs(p) = get_user_gs(regs);
309 267
310 tsk = current; 268 tsk = current;
311 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { 269 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
@@ -343,7 +301,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
343void 301void
344start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) 302start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
345{ 303{
346 __asm__("movl %0, %%gs" : : "r"(0)); 304 set_user_gs(regs, 0);
347 regs->fs = 0; 305 regs->fs = 0;
348 set_fs(USER_DS); 306 set_fs(USER_DS);
349 regs->ds = __USER_DS; 307 regs->ds = __USER_DS;
@@ -359,127 +317,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
359} 317}
360EXPORT_SYMBOL_GPL(start_thread); 318EXPORT_SYMBOL_GPL(start_thread);
361 319
362static void hard_disable_TSC(void)
363{
364 write_cr4(read_cr4() | X86_CR4_TSD);
365}
366
367void disable_TSC(void)
368{
369 preempt_disable();
370 if (!test_and_set_thread_flag(TIF_NOTSC))
371 /*
372 * Must flip the CPU state synchronously with
373 * TIF_NOTSC in the current running context.
374 */
375 hard_disable_TSC();
376 preempt_enable();
377}
378
379static void hard_enable_TSC(void)
380{
381 write_cr4(read_cr4() & ~X86_CR4_TSD);
382}
383
384static void enable_TSC(void)
385{
386 preempt_disable();
387 if (test_and_clear_thread_flag(TIF_NOTSC))
388 /*
389 * Must flip the CPU state synchronously with
390 * TIF_NOTSC in the current running context.
391 */
392 hard_enable_TSC();
393 preempt_enable();
394}
395
396int get_tsc_mode(unsigned long adr)
397{
398 unsigned int val;
399
400 if (test_thread_flag(TIF_NOTSC))
401 val = PR_TSC_SIGSEGV;
402 else
403 val = PR_TSC_ENABLE;
404
405 return put_user(val, (unsigned int __user *)adr);
406}
407
408int set_tsc_mode(unsigned int val)
409{
410 if (val == PR_TSC_SIGSEGV)
411 disable_TSC();
412 else if (val == PR_TSC_ENABLE)
413 enable_TSC();
414 else
415 return -EINVAL;
416
417 return 0;
418}
419
420static noinline void
421__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
422 struct tss_struct *tss)
423{
424 struct thread_struct *prev, *next;
425
426 prev = &prev_p->thread;
427 next = &next_p->thread;
428
429 if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
430 test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
431 ds_switch_to(prev_p, next_p);
432 else if (next->debugctlmsr != prev->debugctlmsr)
433 update_debugctlmsr(next->debugctlmsr);
434
435 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
436 set_debugreg(next->debugreg0, 0);
437 set_debugreg(next->debugreg1, 1);
438 set_debugreg(next->debugreg2, 2);
439 set_debugreg(next->debugreg3, 3);
440 /* no 4 and 5 */
441 set_debugreg(next->debugreg6, 6);
442 set_debugreg(next->debugreg7, 7);
443 }
444
445 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
446 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
447 /* prev and next are different */
448 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
449 hard_disable_TSC();
450 else
451 hard_enable_TSC();
452 }
453
454 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
455 /*
456 * Disable the bitmap via an invalid offset. We still cache
457 * the previous bitmap owner and the IO bitmap contents:
458 */
459 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
460 return;
461 }
462
463 if (likely(next == tss->io_bitmap_owner)) {
464 /*
465 * Previous owner of the bitmap (hence the bitmap content)
466 * matches the next task, we dont have to do anything but
467 * to set a valid offset in the TSS:
468 */
469 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
470 return;
471 }
472 /*
473 * Lazy TSS's I/O bitmap copy. We set an invalid offset here
474 * and we let the task to get a GPF in case an I/O instruction
475 * is performed. The handler of the GPF will verify that the
476 * faulting task has a valid I/O bitmap and, it true, does the
477 * real copy and restart the instruction. This will save us
478 * redundant copies when the currently switched task does not
479 * perform any I/O during its timeslice.
480 */
481 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
482}
483 320
484/* 321/*
485 * switch_to(x,yn) should switch tasks from x to y. 322 * switch_to(x,yn) should switch tasks from x to y.
@@ -540,7 +377,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
540 * used %fs or %gs (it does not today), or if the kernel is 377 * used %fs or %gs (it does not today), or if the kernel is
541 * running inside of a hypervisor layer. 378 * running inside of a hypervisor layer.
542 */ 379 */
543 savesegment(gs, prev->gs); 380 lazy_save_gs(prev->gs);
544 381
545 /* 382 /*
546 * Load the per-thread Thread-Local Storage descriptor. 383 * Load the per-thread Thread-Local Storage descriptor.
@@ -586,64 +423,44 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
586 * Restore %gs if needed (which is common) 423 * Restore %gs if needed (which is common)
587 */ 424 */
588 if (prev->gs | next->gs) 425 if (prev->gs | next->gs)
589 loadsegment(gs, next->gs); 426 lazy_load_gs(next->gs);
590 427
591 x86_write_percpu(current_task, next_p); 428 percpu_write(current_task, next_p);
592 429
593 return prev_p; 430 return prev_p;
594} 431}
595 432
596asmlinkage int sys_fork(struct pt_regs regs) 433int sys_clone(struct pt_regs *regs)
597{
598 return do_fork(SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
599}
600
601asmlinkage int sys_clone(struct pt_regs regs)
602{ 434{
603 unsigned long clone_flags; 435 unsigned long clone_flags;
604 unsigned long newsp; 436 unsigned long newsp;
605 int __user *parent_tidptr, *child_tidptr; 437 int __user *parent_tidptr, *child_tidptr;
606 438
607 clone_flags = regs.bx; 439 clone_flags = regs->bx;
608 newsp = regs.cx; 440 newsp = regs->cx;
609 parent_tidptr = (int __user *)regs.dx; 441 parent_tidptr = (int __user *)regs->dx;
610 child_tidptr = (int __user *)regs.di; 442 child_tidptr = (int __user *)regs->di;
611 if (!newsp) 443 if (!newsp)
612 newsp = regs.sp; 444 newsp = regs->sp;
613 return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr); 445 return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr);
614}
615
616/*
617 * This is trivial, and on the face of it looks like it
618 * could equally well be done in user mode.
619 *
620 * Not so, for quite unobvious reasons - register pressure.
621 * In user mode vfork() cannot have a stack frame, and if
622 * done by calling the "clone()" system call directly, you
623 * do not have enough call-clobbered registers to hold all
624 * the information you need.
625 */
626asmlinkage int sys_vfork(struct pt_regs regs)
627{
628 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
629} 446}
630 447
631/* 448/*
632 * sys_execve() executes a new program. 449 * sys_execve() executes a new program.
633 */ 450 */
634asmlinkage int sys_execve(struct pt_regs regs) 451int sys_execve(struct pt_regs *regs)
635{ 452{
636 int error; 453 int error;
637 char *filename; 454 char *filename;
638 455
639 filename = getname((char __user *) regs.bx); 456 filename = getname((char __user *) regs->bx);
640 error = PTR_ERR(filename); 457 error = PTR_ERR(filename);
641 if (IS_ERR(filename)) 458 if (IS_ERR(filename))
642 goto out; 459 goto out;
643 error = do_execve(filename, 460 error = do_execve(filename,
644 (char __user * __user *) regs.cx, 461 (char __user * __user *) regs->cx,
645 (char __user * __user *) regs.dx, 462 (char __user * __user *) regs->dx,
646 &regs); 463 regs);
647 if (error == 0) { 464 if (error == 0) {
648 /* Make sure we don't return using sysenter.. */ 465 /* Make sure we don't return using sysenter.. */
649 set_thread_flag(TIF_IRET); 466 set_thread_flag(TIF_IRET);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 85b4cb5c198..abb7e6a7f0c 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -16,6 +16,7 @@
16 16
17#include <stdarg.h> 17#include <stdarg.h>
18 18
19#include <linux/stackprotector.h>
19#include <linux/cpu.h> 20#include <linux/cpu.h>
20#include <linux/errno.h> 21#include <linux/errno.h>
21#include <linux/sched.h> 22#include <linux/sched.h>
@@ -47,7 +48,6 @@
47#include <asm/processor.h> 48#include <asm/processor.h>
48#include <asm/i387.h> 49#include <asm/i387.h>
49#include <asm/mmu_context.h> 50#include <asm/mmu_context.h>
50#include <asm/pda.h>
51#include <asm/prctl.h> 51#include <asm/prctl.h>
52#include <asm/desc.h> 52#include <asm/desc.h>
53#include <asm/proto.h> 53#include <asm/proto.h>
@@ -58,6 +58,12 @@
58 58
59asmlinkage extern void ret_from_fork(void); 59asmlinkage extern void ret_from_fork(void);
60 60
61DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
62EXPORT_PER_CPU_SYMBOL(current_task);
63
64DEFINE_PER_CPU(unsigned long, old_rsp);
65static DEFINE_PER_CPU(unsigned char, is_idle);
66
61unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; 67unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
62 68
63static ATOMIC_NOTIFIER_HEAD(idle_notifier); 69static ATOMIC_NOTIFIER_HEAD(idle_notifier);
@@ -76,13 +82,13 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister);
76 82
77void enter_idle(void) 83void enter_idle(void)
78{ 84{
79 write_pda(isidle, 1); 85 percpu_write(is_idle, 1);
80 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); 86 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
81} 87}
82 88
83static void __exit_idle(void) 89static void __exit_idle(void)
84{ 90{
85 if (test_and_clear_bit_pda(0, isidle) == 0) 91 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
86 return; 92 return;
87 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); 93 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
88} 94}
@@ -112,6 +118,16 @@ static inline void play_dead(void)
112void cpu_idle(void) 118void cpu_idle(void)
113{ 119{
114 current_thread_info()->status |= TS_POLLING; 120 current_thread_info()->status |= TS_POLLING;
121
122 /*
123 * If we're the non-boot CPU, nothing set the stack canary up
124 * for us. CPU0 already has it initialized but no harm in
125 * doing it again. This is a good place for updating it, as
126 * we wont ever return from this function (so the invalid
127 * canaries already on the stack wont ever trigger).
128 */
129 boot_init_stack_canary();
130
115 /* endless idle loop with no priority at all */ 131 /* endless idle loop with no priority at all */
116 while (1) { 132 while (1) {
117 tick_nohz_stop_sched_tick(1); 133 tick_nohz_stop_sched_tick(1);
@@ -221,61 +237,6 @@ void show_regs(struct pt_regs *regs)
221 show_trace(NULL, regs, (void *)(regs + 1), regs->bp); 237 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
222} 238}
223 239
224/*
225 * Free current thread data structures etc..
226 */
227void exit_thread(void)
228{
229 struct task_struct *me = current;
230 struct thread_struct *t = &me->thread;
231
232 if (me->thread.io_bitmap_ptr) {
233 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
234
235 kfree(t->io_bitmap_ptr);
236 t->io_bitmap_ptr = NULL;
237 clear_thread_flag(TIF_IO_BITMAP);
238 /*
239 * Careful, clear this in the TSS too:
240 */
241 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
242 t->io_bitmap_max = 0;
243 put_cpu();
244 }
245
246 ds_exit_thread(current);
247}
248
249void flush_thread(void)
250{
251 struct task_struct *tsk = current;
252
253 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
254 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
255 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
256 clear_tsk_thread_flag(tsk, TIF_IA32);
257 } else {
258 set_tsk_thread_flag(tsk, TIF_IA32);
259 current_thread_info()->status |= TS_COMPAT;
260 }
261 }
262 clear_tsk_thread_flag(tsk, TIF_DEBUG);
263
264 tsk->thread.debugreg0 = 0;
265 tsk->thread.debugreg1 = 0;
266 tsk->thread.debugreg2 = 0;
267 tsk->thread.debugreg3 = 0;
268 tsk->thread.debugreg6 = 0;
269 tsk->thread.debugreg7 = 0;
270 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
271 /*
272 * Forget coprocessor state..
273 */
274 tsk->fpu_counter = 0;
275 clear_fpu(tsk);
276 clear_used_math();
277}
278
279void release_thread(struct task_struct *dead_task) 240void release_thread(struct task_struct *dead_task)
280{ 241{
281 if (dead_task->mm) { 242 if (dead_task->mm) {
@@ -397,7 +358,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
397 load_gs_index(0); 358 load_gs_index(0);
398 regs->ip = new_ip; 359 regs->ip = new_ip;
399 regs->sp = new_sp; 360 regs->sp = new_sp;
400 write_pda(oldrsp, new_sp); 361 percpu_write(old_rsp, new_sp);
401 regs->cs = __USER_CS; 362 regs->cs = __USER_CS;
402 regs->ss = __USER_DS; 363 regs->ss = __USER_DS;
403 regs->flags = 0x200; 364 regs->flags = 0x200;
@@ -409,118 +370,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
409} 370}
410EXPORT_SYMBOL_GPL(start_thread); 371EXPORT_SYMBOL_GPL(start_thread);
411 372
412static void hard_disable_TSC(void)
413{
414 write_cr4(read_cr4() | X86_CR4_TSD);
415}
416
417void disable_TSC(void)
418{
419 preempt_disable();
420 if (!test_and_set_thread_flag(TIF_NOTSC))
421 /*
422 * Must flip the CPU state synchronously with
423 * TIF_NOTSC in the current running context.
424 */
425 hard_disable_TSC();
426 preempt_enable();
427}
428
429static void hard_enable_TSC(void)
430{
431 write_cr4(read_cr4() & ~X86_CR4_TSD);
432}
433
434static void enable_TSC(void)
435{
436 preempt_disable();
437 if (test_and_clear_thread_flag(TIF_NOTSC))
438 /*
439 * Must flip the CPU state synchronously with
440 * TIF_NOTSC in the current running context.
441 */
442 hard_enable_TSC();
443 preempt_enable();
444}
445
446int get_tsc_mode(unsigned long adr)
447{
448 unsigned int val;
449
450 if (test_thread_flag(TIF_NOTSC))
451 val = PR_TSC_SIGSEGV;
452 else
453 val = PR_TSC_ENABLE;
454
455 return put_user(val, (unsigned int __user *)adr);
456}
457
458int set_tsc_mode(unsigned int val)
459{
460 if (val == PR_TSC_SIGSEGV)
461 disable_TSC();
462 else if (val == PR_TSC_ENABLE)
463 enable_TSC();
464 else
465 return -EINVAL;
466
467 return 0;
468}
469
470/*
471 * This special macro can be used to load a debugging register
472 */
473#define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
474
475static inline void __switch_to_xtra(struct task_struct *prev_p,
476 struct task_struct *next_p,
477 struct tss_struct *tss)
478{
479 struct thread_struct *prev, *next;
480
481 prev = &prev_p->thread,
482 next = &next_p->thread;
483
484 if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
485 test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
486 ds_switch_to(prev_p, next_p);
487 else if (next->debugctlmsr != prev->debugctlmsr)
488 update_debugctlmsr(next->debugctlmsr);
489
490 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
491 loaddebug(next, 0);
492 loaddebug(next, 1);
493 loaddebug(next, 2);
494 loaddebug(next, 3);
495 /* no 4 and 5 */
496 loaddebug(next, 6);
497 loaddebug(next, 7);
498 }
499
500 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
501 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
502 /* prev and next are different */
503 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
504 hard_disable_TSC();
505 else
506 hard_enable_TSC();
507 }
508
509 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
510 /*
511 * Copy the relevant range of the IO bitmap.
512 * Normally this is 128 bytes or less:
513 */
514 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
515 max(prev->io_bitmap_max, next->io_bitmap_max));
516 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
517 /*
518 * Clear any possible leftover bits:
519 */
520 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
521 }
522}
523
524/* 373/*
525 * switch_to(x,y) should switch tasks from x to y. 374 * switch_to(x,y) should switch tasks from x to y.
526 * 375 *
@@ -618,21 +467,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
618 /* 467 /*
619 * Switch the PDA and FPU contexts. 468 * Switch the PDA and FPU contexts.
620 */ 469 */
621 prev->usersp = read_pda(oldrsp); 470 prev->usersp = percpu_read(old_rsp);
622 write_pda(oldrsp, next->usersp); 471 percpu_write(old_rsp, next->usersp);
623 write_pda(pcurrent, next_p); 472 percpu_write(current_task, next_p);
624 473
625 write_pda(kernelstack, 474 percpu_write(kernel_stack,
626 (unsigned long)task_stack_page(next_p) + 475 (unsigned long)task_stack_page(next_p) +
627 THREAD_SIZE - PDA_STACKOFFSET); 476 THREAD_SIZE - KERNEL_STACK_OFFSET);
628#ifdef CONFIG_CC_STACKPROTECTOR
629 write_pda(stack_canary, next_p->stack_canary);
630 /*
631 * Build time only check to make sure the stack_canary is at
632 * offset 40 in the pda; this is a gcc ABI requirement
633 */
634 BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
635#endif
636 477
637 /* 478 /*
638 * Now maybe reload the debug registers and handle I/O bitmaps 479 * Now maybe reload the debug registers and handle I/O bitmaps
@@ -686,11 +527,6 @@ void set_personality_64bit(void)
686 current->personality &= ~READ_IMPLIES_EXEC; 527 current->personality &= ~READ_IMPLIES_EXEC;
687} 528}
688 529
689asmlinkage long sys_fork(struct pt_regs *regs)
690{
691 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
692}
693
694asmlinkage long 530asmlinkage long
695sys_clone(unsigned long clone_flags, unsigned long newsp, 531sys_clone(unsigned long clone_flags, unsigned long newsp,
696 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) 532 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
@@ -700,22 +536,6 @@ sys_clone(unsigned long clone_flags, unsigned long newsp,
700 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); 536 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
701} 537}
702 538
703/*
704 * This is trivial, and on the face of it looks like it
705 * could equally well be done in user mode.
706 *
707 * Not so, for quite unobvious reasons - register pressure.
708 * In user mode vfork() cannot have a stack frame, and if
709 * done by calling the "clone()" system call directly, you
710 * do not have enough call-clobbered registers to hold all
711 * the information you need.
712 */
713asmlinkage long sys_vfork(struct pt_regs *regs)
714{
715 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
716 NULL, NULL);
717}
718
719unsigned long get_wchan(struct task_struct *p) 539unsigned long get_wchan(struct task_struct *p)
720{ 540{
721 unsigned long stack; 541 unsigned long stack;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 06ca07f6ad8..19378715f41 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -75,10 +75,7 @@ static inline bool invalid_selector(u16 value)
75static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 75static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
76{ 76{
77 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); 77 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
78 regno >>= 2; 78 return &regs->bx + (regno >> 2);
79 if (regno > FS)
80 --regno;
81 return &regs->bx + regno;
82} 79}
83 80
84static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 81static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
@@ -90,9 +87,10 @@ static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
90 if (offset != offsetof(struct user_regs_struct, gs)) 87 if (offset != offsetof(struct user_regs_struct, gs))
91 retval = *pt_regs_access(task_pt_regs(task), offset); 88 retval = *pt_regs_access(task_pt_regs(task), offset);
92 else { 89 else {
93 retval = task->thread.gs;
94 if (task == current) 90 if (task == current)
95 savesegment(gs, retval); 91 retval = get_user_gs(task_pt_regs(task));
92 else
93 retval = task_user_gs(task);
96 } 94 }
97 return retval; 95 return retval;
98} 96}
@@ -126,13 +124,10 @@ static int set_segment_reg(struct task_struct *task,
126 break; 124 break;
127 125
128 case offsetof(struct user_regs_struct, gs): 126 case offsetof(struct user_regs_struct, gs):
129 task->thread.gs = value;
130 if (task == current) 127 if (task == current)
131 /* 128 set_user_gs(task_pt_regs(task), value);
132 * The user-mode %gs is not affected by 129 else
133 * kernel entry, so we must update the CPU. 130 task_user_gs(task) = value;
134 */
135 loadsegment(gs, value);
136 } 131 }
137 132
138 return 0; 133 return 0;
@@ -273,7 +268,7 @@ static unsigned long debugreg_addr_limit(struct task_struct *task)
273 if (test_tsk_thread_flag(task, TIF_IA32)) 268 if (test_tsk_thread_flag(task, TIF_IA32))
274 return IA32_PAGE_OFFSET - 3; 269 return IA32_PAGE_OFFSET - 3;
275#endif 270#endif
276 return TASK_SIZE64 - 7; 271 return TASK_SIZE_MAX - 7;
277} 272}
278 273
279#endif /* CONFIG_X86_32 */ 274#endif /* CONFIG_X86_32 */
@@ -690,9 +685,8 @@ static int ptrace_bts_config(struct task_struct *child,
690 if (!cfg.signal) 685 if (!cfg.signal)
691 return -EINVAL; 686 return -EINVAL;
692 687
693 return -EOPNOTSUPP;
694
695 child->thread.bts_ovfl_signal = cfg.signal; 688 child->thread.bts_ovfl_signal = cfg.signal;
689 return -EOPNOTSUPP;
696 } 690 }
697 691
698 if ((cfg.flags & PTRACE_BTS_O_ALLOC) && 692 if ((cfg.flags & PTRACE_BTS_O_ALLOC) &&
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 309949e9e1c..6a5a2970f4c 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -74,8 +74,7 @@ static void ich_force_hpet_resume(void)
74 if (!force_hpet_address) 74 if (!force_hpet_address)
75 return; 75 return;
76 76
77 if (rcba_base == NULL) 77 BUG_ON(rcba_base == NULL);
78 BUG();
79 78
80 /* read the Function Disable register, dword mode only */ 79 /* read the Function Disable register, dword mode only */
81 val = readl(rcba_base + 0x3404); 80 val = readl(rcba_base + 0x3404);
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 4526b3a75ed..2aef36d8aca 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -14,6 +14,7 @@
14#include <asm/reboot.h> 14#include <asm/reboot.h>
15#include <asm/pci_x86.h> 15#include <asm/pci_x86.h>
16#include <asm/virtext.h> 16#include <asm/virtext.h>
17#include <asm/cpu.h>
17 18
18#ifdef CONFIG_X86_32 19#ifdef CONFIG_X86_32
19# include <linux/dmi.h> 20# include <linux/dmi.h>
@@ -23,8 +24,6 @@
23# include <asm/iommu.h> 24# include <asm/iommu.h>
24#endif 25#endif
25 26
26#include <mach_ipi.h>
27
28/* 27/*
29 * Power off function, if any 28 * Power off function, if any
30 */ 29 */
@@ -658,7 +657,7 @@ static int crash_nmi_callback(struct notifier_block *self,
658 657
659static void smp_send_nmi_allbutself(void) 658static void smp_send_nmi_allbutself(void)
660{ 659{
661 send_IPI_allbutself(NMI_VECTOR); 660 apic->send_IPI_allbutself(NMI_VECTOR);
662} 661}
663 662
664static struct notifier_block crash_nmi_nb = { 663static struct notifier_block crash_nmi_nb = {
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index a160f311972..41235531b11 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/page.h> 10#include <asm/page_types.h>
11#include <asm/kexec.h> 11#include <asm/kexec.h>
12#include <asm/processor-flags.h> 12#include <asm/processor-flags.h>
13 13
@@ -17,7 +17,8 @@
17 17
18#define PTR(x) (x << 2) 18#define PTR(x) (x << 2)
19 19
20/* control_page + KEXEC_CONTROL_CODE_MAX_SIZE 20/*
21 * control_page + KEXEC_CONTROL_CODE_MAX_SIZE
21 * ~ control_page + PAGE_SIZE are used as data storage and stack for 22 * ~ control_page + PAGE_SIZE are used as data storage and stack for
22 * jumping back 23 * jumping back
23 */ 24 */
@@ -76,8 +77,10 @@ relocate_kernel:
76 movl %eax, CP_PA_SWAP_PAGE(%edi) 77 movl %eax, CP_PA_SWAP_PAGE(%edi)
77 movl %ebx, CP_PA_BACKUP_PAGES_MAP(%edi) 78 movl %ebx, CP_PA_BACKUP_PAGES_MAP(%edi)
78 79
79 /* get physical address of control page now */ 80 /*
80 /* this is impossible after page table switch */ 81 * get physical address of control page now
82 * this is impossible after page table switch
83 */
81 movl PTR(PA_CONTROL_PAGE)(%ebp), %edi 84 movl PTR(PA_CONTROL_PAGE)(%ebp), %edi
82 85
83 /* switch to new set of page tables */ 86 /* switch to new set of page tables */
@@ -97,7 +100,8 @@ identity_mapped:
97 /* store the start address on the stack */ 100 /* store the start address on the stack */
98 pushl %edx 101 pushl %edx
99 102
100 /* Set cr0 to a known state: 103 /*
104 * Set cr0 to a known state:
101 * - Paging disabled 105 * - Paging disabled
102 * - Alignment check disabled 106 * - Alignment check disabled
103 * - Write protect disabled 107 * - Write protect disabled
@@ -113,7 +117,8 @@ identity_mapped:
113 /* clear cr4 if applicable */ 117 /* clear cr4 if applicable */
114 testl %ecx, %ecx 118 testl %ecx, %ecx
115 jz 1f 119 jz 1f
116 /* Set cr4 to a known state: 120 /*
121 * Set cr4 to a known state:
117 * Setting everything to zero seems safe. 122 * Setting everything to zero seems safe.
118 */ 123 */
119 xorl %eax, %eax 124 xorl %eax, %eax
@@ -132,15 +137,18 @@ identity_mapped:
132 call swap_pages 137 call swap_pages
133 addl $8, %esp 138 addl $8, %esp
134 139
135 /* To be certain of avoiding problems with self-modifying code 140 /*
141 * To be certain of avoiding problems with self-modifying code
136 * I need to execute a serializing instruction here. 142 * I need to execute a serializing instruction here.
137 * So I flush the TLB, it's handy, and not processor dependent. 143 * So I flush the TLB, it's handy, and not processor dependent.
138 */ 144 */
139 xorl %eax, %eax 145 xorl %eax, %eax
140 movl %eax, %cr3 146 movl %eax, %cr3
141 147
142 /* set all of the registers to known values */ 148 /*
143 /* leave %esp alone */ 149 * set all of the registers to known values
150 * leave %esp alone
151 */
144 152
145 testl %esi, %esi 153 testl %esi, %esi
146 jnz 1f 154 jnz 1f
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index f5afe665a82..4de8f5b3d47 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -7,10 +7,10 @@
7 */ 7 */
8 8
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/page.h> 10#include <asm/page_types.h>
11#include <asm/kexec.h> 11#include <asm/kexec.h>
12#include <asm/processor-flags.h> 12#include <asm/processor-flags.h>
13#include <asm/pgtable.h> 13#include <asm/pgtable_types.h>
14 14
15/* 15/*
16 * Must be relocatable PIC code callable as a C function 16 * Must be relocatable PIC code callable as a C function
@@ -19,145 +19,76 @@
19#define PTR(x) (x << 3) 19#define PTR(x) (x << 3)
20#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 20#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
21 21
22/*
23 * control_page + KEXEC_CONTROL_CODE_MAX_SIZE
24 * ~ control_page + PAGE_SIZE are used as data storage and stack for
25 * jumping back
26 */
27#define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset))
28
29/* Minimal CPU state */
30#define RSP DATA(0x0)
31#define CR0 DATA(0x8)
32#define CR3 DATA(0x10)
33#define CR4 DATA(0x18)
34
35/* other data */
36#define CP_PA_TABLE_PAGE DATA(0x20)
37#define CP_PA_SWAP_PAGE DATA(0x28)
38#define CP_PA_BACKUP_PAGES_MAP DATA(0x30)
39
22 .text 40 .text
23 .align PAGE_SIZE 41 .align PAGE_SIZE
24 .code64 42 .code64
25 .globl relocate_kernel 43 .globl relocate_kernel
26relocate_kernel: 44relocate_kernel:
27 /* %rdi indirection_page 45 /*
46 * %rdi indirection_page
28 * %rsi page_list 47 * %rsi page_list
29 * %rdx start address 48 * %rdx start address
49 * %rcx preserve_context
30 */ 50 */
31 51
32 /* map the control page at its virtual address */ 52 /* Save the CPU context, used for jumping back */
33 53 pushq %rbx
34 movq $0x0000ff8000000000, %r10 /* mask */ 54 pushq %rbp
35 mov $(39 - 3), %cl /* bits to shift */ 55 pushq %r12
36 movq PTR(VA_CONTROL_PAGE)(%rsi), %r11 /* address to map */ 56 pushq %r13
37 57 pushq %r14
38 movq %r11, %r9 58 pushq %r15
39 andq %r10, %r9 59 pushf
40 shrq %cl, %r9 60
41 61 movq PTR(VA_CONTROL_PAGE)(%rsi), %r11
42 movq PTR(VA_PGD)(%rsi), %r8 62 movq %rsp, RSP(%r11)
43 addq %r8, %r9 63 movq %cr0, %rax
44 movq PTR(PA_PUD_0)(%rsi), %r8 64 movq %rax, CR0(%r11)
45 orq $PAGE_ATTR, %r8 65 movq %cr3, %rax
46 movq %r8, (%r9) 66 movq %rax, CR3(%r11)
47 67 movq %cr4, %rax
48 shrq $9, %r10 68 movq %rax, CR4(%r11)
49 sub $9, %cl
50
51 movq %r11, %r9
52 andq %r10, %r9
53 shrq %cl, %r9
54
55 movq PTR(VA_PUD_0)(%rsi), %r8
56 addq %r8, %r9
57 movq PTR(PA_PMD_0)(%rsi), %r8
58 orq $PAGE_ATTR, %r8
59 movq %r8, (%r9)
60
61 shrq $9, %r10
62 sub $9, %cl
63
64 movq %r11, %r9
65 andq %r10, %r9
66 shrq %cl, %r9
67
68 movq PTR(VA_PMD_0)(%rsi), %r8
69 addq %r8, %r9
70 movq PTR(PA_PTE_0)(%rsi), %r8
71 orq $PAGE_ATTR, %r8
72 movq %r8, (%r9)
73
74 shrq $9, %r10
75 sub $9, %cl
76
77 movq %r11, %r9
78 andq %r10, %r9
79 shrq %cl, %r9
80
81 movq PTR(VA_PTE_0)(%rsi), %r8
82 addq %r8, %r9
83 movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
84 orq $PAGE_ATTR, %r8
85 movq %r8, (%r9)
86
87 /* identity map the control page at its physical address */
88
89 movq $0x0000ff8000000000, %r10 /* mask */
90 mov $(39 - 3), %cl /* bits to shift */
91 movq PTR(PA_CONTROL_PAGE)(%rsi), %r11 /* address to map */
92
93 movq %r11, %r9
94 andq %r10, %r9
95 shrq %cl, %r9
96
97 movq PTR(VA_PGD)(%rsi), %r8
98 addq %r8, %r9
99 movq PTR(PA_PUD_1)(%rsi), %r8
100 orq $PAGE_ATTR, %r8
101 movq %r8, (%r9)
102
103 shrq $9, %r10
104 sub $9, %cl
105
106 movq %r11, %r9
107 andq %r10, %r9
108 shrq %cl, %r9
109
110 movq PTR(VA_PUD_1)(%rsi), %r8
111 addq %r8, %r9
112 movq PTR(PA_PMD_1)(%rsi), %r8
113 orq $PAGE_ATTR, %r8
114 movq %r8, (%r9)
115
116 shrq $9, %r10
117 sub $9, %cl
118
119 movq %r11, %r9
120 andq %r10, %r9
121 shrq %cl, %r9
122
123 movq PTR(VA_PMD_1)(%rsi), %r8
124 addq %r8, %r9
125 movq PTR(PA_PTE_1)(%rsi), %r8
126 orq $PAGE_ATTR, %r8
127 movq %r8, (%r9)
128
129 shrq $9, %r10
130 sub $9, %cl
131
132 movq %r11, %r9
133 andq %r10, %r9
134 shrq %cl, %r9
135
136 movq PTR(VA_PTE_1)(%rsi), %r8
137 addq %r8, %r9
138 movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
139 orq $PAGE_ATTR, %r8
140 movq %r8, (%r9)
141
142relocate_new_kernel:
143 /* %rdi indirection_page
144 * %rsi page_list
145 * %rdx start address
146 */
147 69
148 /* zero out flags, and disable interrupts */ 70 /* zero out flags, and disable interrupts */
149 pushq $0 71 pushq $0
150 popfq 72 popfq
151 73
152 /* get physical address of control page now */ 74 /*
153 /* this is impossible after page table switch */ 75 * get physical address of control page now
76 * this is impossible after page table switch
77 */
154 movq PTR(PA_CONTROL_PAGE)(%rsi), %r8 78 movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
155 79
156 /* get physical address of page table now too */ 80 /* get physical address of page table now too */
157 movq PTR(PA_TABLE_PAGE)(%rsi), %rcx 81 movq PTR(PA_TABLE_PAGE)(%rsi), %r9
82
83 /* get physical address of swap page now */
84 movq PTR(PA_SWAP_PAGE)(%rsi), %r10
158 85
159 /* switch to new set of page tables */ 86 /* save some information for jumping back */
160 movq PTR(PA_PGD)(%rsi), %r9 87 movq %r9, CP_PA_TABLE_PAGE(%r11)
88 movq %r10, CP_PA_SWAP_PAGE(%r11)
89 movq %rdi, CP_PA_BACKUP_PAGES_MAP(%r11)
90
91 /* Switch to the identity mapped page tables */
161 movq %r9, %cr3 92 movq %r9, %cr3
162 93
163 /* setup a new stack at the end of the physical control page */ 94 /* setup a new stack at the end of the physical control page */
@@ -172,7 +103,8 @@ identity_mapped:
172 /* store the start address on the stack */ 103 /* store the start address on the stack */
173 pushq %rdx 104 pushq %rdx
174 105
175 /* Set cr0 to a known state: 106 /*
107 * Set cr0 to a known state:
176 * - Paging enabled 108 * - Paging enabled
177 * - Alignment check disabled 109 * - Alignment check disabled
178 * - Write protect disabled 110 * - Write protect disabled
@@ -185,7 +117,8 @@ identity_mapped:
185 orl $(X86_CR0_PG | X86_CR0_PE), %eax 117 orl $(X86_CR0_PG | X86_CR0_PE), %eax
186 movq %rax, %cr0 118 movq %rax, %cr0
187 119
188 /* Set cr4 to a known state: 120 /*
121 * Set cr4 to a known state:
189 * - physical address extension enabled 122 * - physical address extension enabled
190 */ 123 */
191 movq $X86_CR4_PAE, %rax 124 movq $X86_CR4_PAE, %rax
@@ -194,12 +127,88 @@ identity_mapped:
194 jmp 1f 127 jmp 1f
1951: 1281:
196 129
197 /* Switch to the identity mapped page tables, 130 /* Flush the TLB (needed?) */
198 * and flush the TLB. 131 movq %r9, %cr3
199 */ 132
200 movq %rcx, %cr3 133 movq %rcx, %r11
134 call swap_pages
135
136 /*
137 * To be certain of avoiding problems with self-modifying code
138 * I need to execute a serializing instruction here.
139 * So I flush the TLB by reloading %cr3 here, it's handy,
140 * and not processor dependent.
141 */
142 movq %cr3, %rax
143 movq %rax, %cr3
144
145 /*
146 * set all of the registers to known values
147 * leave %rsp alone
148 */
149
150 testq %r11, %r11
151 jnz 1f
152 xorq %rax, %rax
153 xorq %rbx, %rbx
154 xorq %rcx, %rcx
155 xorq %rdx, %rdx
156 xorq %rsi, %rsi
157 xorq %rdi, %rdi
158 xorq %rbp, %rbp
159 xorq %r8, %r8
160 xorq %r9, %r9
161 xorq %r10, %r9
162 xorq %r11, %r11
163 xorq %r12, %r12
164 xorq %r13, %r13
165 xorq %r14, %r14
166 xorq %r15, %r15
167
168 ret
169
1701:
171 popq %rdx
172 leaq PAGE_SIZE(%r10), %rsp
173 call *%rdx
174
175 /* get the re-entry point of the peer system */
176 movq 0(%rsp), %rbp
177 call 1f
1781:
179 popq %r8
180 subq $(1b - relocate_kernel), %r8
181 movq CP_PA_SWAP_PAGE(%r8), %r10
182 movq CP_PA_BACKUP_PAGES_MAP(%r8), %rdi
183 movq CP_PA_TABLE_PAGE(%r8), %rax
184 movq %rax, %cr3
185 lea PAGE_SIZE(%r8), %rsp
186 call swap_pages
187 movq $virtual_mapped, %rax
188 pushq %rax
189 ret
190
191virtual_mapped:
192 movq RSP(%r8), %rsp
193 movq CR4(%r8), %rax
194 movq %rax, %cr4
195 movq CR3(%r8), %rax
196 movq CR0(%r8), %r8
197 movq %rax, %cr3
198 movq %r8, %cr0
199 movq %rbp, %rax
200
201 popf
202 popq %r15
203 popq %r14
204 popq %r13
205 popq %r12
206 popq %rbp
207 popq %rbx
208 ret
201 209
202 /* Do the copies */ 210 /* Do the copies */
211swap_pages:
203 movq %rdi, %rcx /* Put the page_list in %rcx */ 212 movq %rdi, %rcx /* Put the page_list in %rcx */
204 xorq %rdi, %rdi 213 xorq %rdi, %rdi
205 xorq %rsi, %rsi 214 xorq %rsi, %rsi
@@ -231,36 +240,27 @@ identity_mapped:
231 movq %rcx, %rsi /* For ever source page do a copy */ 240 movq %rcx, %rsi /* For ever source page do a copy */
232 andq $0xfffffffffffff000, %rsi 241 andq $0xfffffffffffff000, %rsi
233 242
243 movq %rdi, %rdx
244 movq %rsi, %rax
245
246 movq %r10, %rdi
234 movq $512, %rcx 247 movq $512, %rcx
235 rep ; movsq 248 rep ; movsq
236 jmp 0b
2373:
238
239 /* To be certain of avoiding problems with self-modifying code
240 * I need to execute a serializing instruction here.
241 * So I flush the TLB by reloading %cr3 here, it's handy,
242 * and not processor dependent.
243 */
244 movq %cr3, %rax
245 movq %rax, %cr3
246 249
247 /* set all of the registers to known values */ 250 movq %rax, %rdi
248 /* leave %rsp alone */ 251 movq %rdx, %rsi
252 movq $512, %rcx
253 rep ; movsq
249 254
250 xorq %rax, %rax 255 movq %rdx, %rdi
251 xorq %rbx, %rbx 256 movq %r10, %rsi
252 xorq %rcx, %rcx 257 movq $512, %rcx
253 xorq %rdx, %rdx 258 rep ; movsq
254 xorq %rsi, %rsi
255 xorq %rdi, %rdi
256 xorq %rbp, %rbp
257 xorq %r8, %r8
258 xorq %r9, %r9
259 xorq %r10, %r9
260 xorq %r11, %r11
261 xorq %r12, %r12
262 xorq %r13, %r13
263 xorq %r14, %r14
264 xorq %r15, %r15
265 259
260 lea PAGE_SIZE(%rax), %rsi
261 jmp 0b
2623:
266 ret 263 ret
264
265 .globl kexec_control_code_size
266.set kexec_control_code_size, . - relocate_kernel
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 6a8811a6932..f28c56e6bf9 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -74,14 +74,15 @@
74#include <asm/e820.h> 74#include <asm/e820.h>
75#include <asm/mpspec.h> 75#include <asm/mpspec.h>
76#include <asm/setup.h> 76#include <asm/setup.h>
77#include <asm/arch_hooks.h>
78#include <asm/efi.h> 77#include <asm/efi.h>
78#include <asm/timer.h>
79#include <asm/i8259.h>
79#include <asm/sections.h> 80#include <asm/sections.h>
80#include <asm/dmi.h> 81#include <asm/dmi.h>
81#include <asm/io_apic.h> 82#include <asm/io_apic.h>
82#include <asm/ist.h> 83#include <asm/ist.h>
83#include <asm/vmi.h> 84#include <asm/vmi.h>
84#include <setup_arch.h> 85#include <asm/setup_arch.h>
85#include <asm/bios_ebda.h> 86#include <asm/bios_ebda.h>
86#include <asm/cacheflush.h> 87#include <asm/cacheflush.h>
87#include <asm/processor.h> 88#include <asm/processor.h>
@@ -89,7 +90,7 @@
89 90
90#include <asm/system.h> 91#include <asm/system.h>
91#include <asm/vsyscall.h> 92#include <asm/vsyscall.h>
92#include <asm/smp.h> 93#include <asm/cpu.h>
93#include <asm/desc.h> 94#include <asm/desc.h>
94#include <asm/dma.h> 95#include <asm/dma.h>
95#include <asm/iommu.h> 96#include <asm/iommu.h>
@@ -97,7 +98,6 @@
97#include <asm/mmu_context.h> 98#include <asm/mmu_context.h>
98#include <asm/proto.h> 99#include <asm/proto.h>
99 100
100#include <mach_apic.h>
101#include <asm/paravirt.h> 101#include <asm/paravirt.h>
102#include <asm/hypervisor.h> 102#include <asm/hypervisor.h>
103 103
@@ -112,6 +112,20 @@
112#define ARCH_SETUP 112#define ARCH_SETUP
113#endif 113#endif
114 114
115unsigned int boot_cpu_id __read_mostly;
116
117#ifdef CONFIG_X86_64
118int default_cpu_present_to_apicid(int mps_cpu)
119{
120 return __default_cpu_present_to_apicid(mps_cpu);
121}
122
123int default_check_phys_apicid_present(int boot_cpu_physical_apicid)
124{
125 return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
126}
127#endif
128
115#ifndef CONFIG_DEBUG_BOOT_PARAMS 129#ifndef CONFIG_DEBUG_BOOT_PARAMS
116struct boot_params __initdata boot_params; 130struct boot_params __initdata boot_params;
117#else 131#else
@@ -188,7 +202,9 @@ struct ist_info ist_info;
188#endif 202#endif
189 203
190#else 204#else
191struct cpuinfo_x86 boot_cpu_data __read_mostly; 205struct cpuinfo_x86 boot_cpu_data __read_mostly = {
206 .x86_phys_bits = MAX_PHYSMEM_BITS,
207};
192EXPORT_SYMBOL(boot_cpu_data); 208EXPORT_SYMBOL(boot_cpu_data);
193#endif 209#endif
194 210
@@ -586,20 +602,7 @@ static int __init setup_elfcorehdr(char *arg)
586early_param("elfcorehdr", setup_elfcorehdr); 602early_param("elfcorehdr", setup_elfcorehdr);
587#endif 603#endif
588 604
589static int __init default_update_genapic(void) 605static struct x86_quirks default_x86_quirks __initdata;
590{
591#ifdef CONFIG_X86_SMP
592# if defined(CONFIG_X86_GENERICARCH) || defined(CONFIG_X86_64)
593 genapic->wakeup_cpu = wakeup_secondary_cpu_via_init;
594# endif
595#endif
596
597 return 0;
598}
599
600static struct x86_quirks default_x86_quirks __initdata = {
601 .update_genapic = default_update_genapic,
602};
603 606
604struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; 607struct x86_quirks *x86_quirks __initdata = &default_x86_quirks;
605 608
@@ -656,7 +659,6 @@ void __init setup_arch(char **cmdline_p)
656#ifdef CONFIG_X86_32 659#ifdef CONFIG_X86_32
657 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); 660 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
658 visws_early_detect(); 661 visws_early_detect();
659 pre_setup_arch_hook();
660#else 662#else
661 printk(KERN_INFO "Command line: %s\n", boot_command_line); 663 printk(KERN_INFO "Command line: %s\n", boot_command_line);
662#endif 664#endif
@@ -824,8 +826,7 @@ void __init setup_arch(char **cmdline_p)
824#else 826#else
825 num_physpages = max_pfn; 827 num_physpages = max_pfn;
826 828
827 if (cpu_has_x2apic) 829 check_x2apic();
828 check_x2apic();
829 830
830 /* How many end-of-memory variables you have, grandma! */ 831 /* How many end-of-memory variables you have, grandma! */
831 /* need this before calling reserve_initrd */ 832 /* need this before calling reserve_initrd */
@@ -865,9 +866,7 @@ void __init setup_arch(char **cmdline_p)
865 866
866 reserve_initrd(); 867 reserve_initrd();
867 868
868#ifdef CONFIG_X86_64
869 vsmp_init(); 869 vsmp_init();
870#endif
871 870
872 io_delay_init(); 871 io_delay_init();
873 872
@@ -893,12 +892,11 @@ void __init setup_arch(char **cmdline_p)
893 */ 892 */
894 acpi_reserve_bootmem(); 893 acpi_reserve_bootmem();
895#endif 894#endif
896#ifdef CONFIG_X86_FIND_SMP_CONFIG
897 /* 895 /*
898 * Find and reserve possible boot-time SMP configuration: 896 * Find and reserve possible boot-time SMP configuration:
899 */ 897 */
900 find_smp_config(); 898 find_smp_config();
901#endif 899
902 reserve_crashkernel(); 900 reserve_crashkernel();
903 901
904#ifdef CONFIG_X86_64 902#ifdef CONFIG_X86_64
@@ -925,9 +923,7 @@ void __init setup_arch(char **cmdline_p)
925 map_vsyscall(); 923 map_vsyscall();
926#endif 924#endif
927 925
928#ifdef CONFIG_X86_GENERICARCH
929 generic_apic_probe(); 926 generic_apic_probe();
930#endif
931 927
932 early_quirks(); 928 early_quirks();
933 929
@@ -978,4 +974,95 @@ void __init setup_arch(char **cmdline_p)
978#endif 974#endif
979} 975}
980 976
977#ifdef CONFIG_X86_32
978
979/**
980 * x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors
981 *
982 * Description:
983 * Perform any necessary interrupt initialisation prior to setting up
984 * the "ordinary" interrupt call gates. For legacy reasons, the ISA
985 * interrupts should be initialised here if the machine emulates a PC
986 * in any way.
987 **/
988void __init x86_quirk_pre_intr_init(void)
989{
990 if (x86_quirks->arch_pre_intr_init) {
991 if (x86_quirks->arch_pre_intr_init())
992 return;
993 }
994 init_ISA_irqs();
995}
996
997/**
998 * x86_quirk_intr_init - post gate setup interrupt initialisation
999 *
1000 * Description:
1001 * Fill in any interrupts that may have been left out by the general
1002 * init_IRQ() routine. interrupts having to do with the machine rather
1003 * than the devices on the I/O bus (like APIC interrupts in intel MP
1004 * systems) are started here.
1005 **/
1006void __init x86_quirk_intr_init(void)
1007{
1008 if (x86_quirks->arch_intr_init) {
1009 if (x86_quirks->arch_intr_init())
1010 return;
1011 }
1012}
1013
1014/**
1015 * x86_quirk_trap_init - initialise system specific traps
1016 *
1017 * Description:
1018 * Called as the final act of trap_init(). Used in VISWS to initialise
1019 * the various board specific APIC traps.
1020 **/
1021void __init x86_quirk_trap_init(void)
1022{
1023 if (x86_quirks->arch_trap_init) {
1024 if (x86_quirks->arch_trap_init())
1025 return;
1026 }
1027}
1028
1029static struct irqaction irq0 = {
1030 .handler = timer_interrupt,
1031 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
1032 .mask = CPU_MASK_NONE,
1033 .name = "timer"
1034};
1035
1036/**
1037 * x86_quirk_pre_time_init - do any specific initialisations before.
1038 *
1039 **/
1040void __init x86_quirk_pre_time_init(void)
1041{
1042 if (x86_quirks->arch_pre_time_init)
1043 x86_quirks->arch_pre_time_init();
1044}
981 1045
1046/**
1047 * x86_quirk_time_init - do any specific initialisations for the system timer.
1048 *
1049 * Description:
1050 * Must plug the system timer interrupt source at HZ into the IRQ listed
1051 * in irq_vectors.h:TIMER_IRQ
1052 **/
1053void __init x86_quirk_time_init(void)
1054{
1055 if (x86_quirks->arch_time_init) {
1056 /*
1057 * A nonzero return code does not mean failure, it means
1058 * that the architecture quirk does not want any
1059 * generic (timer) setup to be performed after this:
1060 */
1061 if (x86_quirks->arch_time_init())
1062 return;
1063 }
1064
1065 irq0.mask = cpumask_of_cpu(0);
1066 setup_irq(0, &irq0);
1067}
1068#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 01161077a49..400331b50a5 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -7,402 +7,439 @@
7#include <linux/crash_dump.h> 7#include <linux/crash_dump.h>
8#include <linux/smp.h> 8#include <linux/smp.h>
9#include <linux/topology.h> 9#include <linux/topology.h>
10#include <linux/pfn.h>
10#include <asm/sections.h> 11#include <asm/sections.h>
11#include <asm/processor.h> 12#include <asm/processor.h>
12#include <asm/setup.h> 13#include <asm/setup.h>
13#include <asm/mpspec.h> 14#include <asm/mpspec.h>
14#include <asm/apicdef.h> 15#include <asm/apicdef.h>
15#include <asm/highmem.h> 16#include <asm/highmem.h>
17#include <asm/proto.h>
18#include <asm/cpumask.h>
19#include <asm/cpu.h>
20#include <asm/stackprotector.h>
16 21
17#ifdef CONFIG_X86_LOCAL_APIC 22#ifdef CONFIG_DEBUG_PER_CPU_MAPS
18unsigned int num_processors; 23# define DBG(x...) printk(KERN_DEBUG x)
19unsigned disabled_cpus __cpuinitdata; 24#else
20/* Processor that is doing the boot up */ 25# define DBG(x...)
21unsigned int boot_cpu_physical_apicid = -1U;
22EXPORT_SYMBOL(boot_cpu_physical_apicid);
23unsigned int max_physical_apicid;
24
25/* Bitmask of physically existing CPUs */
26physid_mask_t phys_cpu_present_map;
27#endif 26#endif
28 27
29/* map cpu index to physical APIC ID */ 28DEFINE_PER_CPU(int, cpu_number);
30DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID); 29EXPORT_PER_CPU_SYMBOL(cpu_number);
31DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
32EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
33EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
34
35#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
36#define X86_64_NUMA 1
37 30
38/* map cpu index to node index */ 31#ifdef CONFIG_X86_64
39DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); 32#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
40EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); 33#else
34#define BOOT_PERCPU_OFFSET 0
35#endif
41 36
42/* which logical CPUs are on which nodes */ 37DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
43cpumask_t *node_to_cpumask_map; 38EXPORT_PER_CPU_SYMBOL(this_cpu_off);
44EXPORT_SYMBOL(node_to_cpumask_map);
45 39
46/* setup node_to_cpumask_map */ 40unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
47static void __init setup_node_to_cpumask_map(void); 41 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
42};
43EXPORT_SYMBOL(__per_cpu_offset);
48 44
45/*
46 * On x86_64 symbols referenced from code should be reachable using
47 * 32bit relocations. Reserve space for static percpu variables in
48 * modules so that they are always served from the first chunk which
49 * is located at the percpu segment base. On x86_32, anything can
50 * address anywhere. No need to reserve space in the first chunk.
51 */
52#ifdef CONFIG_X86_64
53#define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
49#else 54#else
50static inline void setup_node_to_cpumask_map(void) { } 55#define PERCPU_FIRST_CHUNK_RESERVE 0
51#endif 56#endif
52 57
53#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP) 58/**
54/* 59 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
55 * Copy data used in early init routines from the initial arrays to the 60 *
56 * per cpu data areas. These arrays then become expendable and the 61 * If NUMA is not configured or there is only one NUMA node available,
57 * *_early_ptr's are zeroed indicating that the static arrays are gone. 62 * there is no reason to consider NUMA. This function determines
63 * whether percpu allocation should consider NUMA or not.
64 *
65 * RETURNS:
66 * true if NUMA should be considered; otherwise, false.
58 */ 67 */
59static void __init setup_per_cpu_maps(void) 68static bool __init pcpu_need_numa(void)
60{ 69{
61 int cpu; 70#ifdef CONFIG_NEED_MULTIPLE_NODES
71 pg_data_t *last = NULL;
72 unsigned int cpu;
62 73
63 for_each_possible_cpu(cpu) { 74 for_each_possible_cpu(cpu) {
64 per_cpu(x86_cpu_to_apicid, cpu) = 75 int node = early_cpu_to_node(cpu);
65 early_per_cpu_map(x86_cpu_to_apicid, cpu);
66 per_cpu(x86_bios_cpu_apicid, cpu) =
67 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
68#ifdef X86_64_NUMA
69 per_cpu(x86_cpu_to_node_map, cpu) =
70 early_per_cpu_map(x86_cpu_to_node_map, cpu);
71#endif
72 }
73 76
74 /* indicate the early static arrays will soon be gone */ 77 if (node_online(node) && NODE_DATA(node) &&
75 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; 78 last && last != NODE_DATA(node))
76 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; 79 return true;
77#ifdef X86_64_NUMA 80
78 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; 81 last = NODE_DATA(node);
82 }
79#endif 83#endif
84 return false;
80} 85}
81 86
82#ifdef CONFIG_X86_32 87/**
83/* 88 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
84 * Great future not-so-futuristic plan: make i386 and x86_64 do it 89 * @cpu: cpu to allocate for
85 * the same way 90 * @size: size allocation in bytes
86 */ 91 * @align: alignment
87unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 92 *
88EXPORT_SYMBOL(__per_cpu_offset); 93 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
89static inline void setup_cpu_pda_map(void) { } 94 * does the right thing for NUMA regardless of the current
90 95 * configuration.
91#elif !defined(CONFIG_SMP) 96 *
92static inline void setup_cpu_pda_map(void) { } 97 * RETURNS:
93 98 * Pointer to the allocated area on success, NULL on failure.
94#else /* CONFIG_SMP && CONFIG_X86_64 */
95
96/*
97 * Allocate cpu_pda pointer table and array via alloc_bootmem.
98 */ 99 */
99static void __init setup_cpu_pda_map(void) 100static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
101 unsigned long align)
100{ 102{
101 char *pda; 103 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
102 struct x8664_pda **new_cpu_pda; 104#ifdef CONFIG_NEED_MULTIPLE_NODES
103 unsigned long size; 105 int node = early_cpu_to_node(cpu);
104 int cpu; 106 void *ptr;
105 107
106 size = roundup(sizeof(struct x8664_pda), cache_line_size()); 108 if (!node_online(node) || !NODE_DATA(node)) {
107 109 ptr = __alloc_bootmem_nopanic(size, align, goal);
108 /* allocate cpu_pda array and pointer table */ 110 pr_info("cpu %d has no node %d or node-local memory\n",
109 { 111 cpu, node);
110 unsigned long tsize = nr_cpu_ids * sizeof(void *); 112 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
111 unsigned long asize = size * (nr_cpu_ids - 1); 113 cpu, size, __pa(ptr));
112 114 } else {
113 tsize = roundup(tsize, cache_line_size()); 115 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
114 new_cpu_pda = alloc_bootmem(tsize + asize); 116 size, align, goal);
115 pda = (char *)new_cpu_pda + tsize; 117 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
116 } 118 "%016lx\n", cpu, size, node, __pa(ptr));
117
118 /* initialize pointer table to static pda's */
119 for_each_possible_cpu(cpu) {
120 if (cpu == 0) {
121 /* leave boot cpu pda in place */
122 new_cpu_pda[0] = cpu_pda(0);
123 continue;
124 }
125 new_cpu_pda[cpu] = (struct x8664_pda *)pda;
126 new_cpu_pda[cpu]->in_bootmem = 1;
127 pda += size;
128 } 119 }
129 120 return ptr;
130 /* point to new pointer table */ 121#else
131 _cpu_pda = new_cpu_pda; 122 return __alloc_bootmem_nopanic(size, align, goal);
123#endif
132} 124}
133 125
134#endif /* CONFIG_SMP && CONFIG_X86_64 */ 126/*
135 127 * Remap allocator
136#ifdef CONFIG_X86_64 128 *
129 * This allocator uses PMD page as unit. A PMD page is allocated for
130 * each cpu and each is remapped into vmalloc area using PMD mapping.
131 * As PMD page is quite large, only part of it is used for the first
132 * chunk. Unused part is returned to the bootmem allocator.
133 *
134 * So, the PMD pages are mapped twice - once to the physical mapping
135 * and to the vmalloc area for the first percpu chunk. The double
136 * mapping does add one more PMD TLB entry pressure but still is much
137 * better than only using 4k mappings while still being NUMA friendly.
138 */
139#ifdef CONFIG_NEED_MULTIPLE_NODES
140static size_t pcpur_size __initdata;
141static void **pcpur_ptrs __initdata;
137 142
138/* correctly size the local cpu masks */ 143static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
139static void __init setup_cpu_local_masks(void)
140{ 144{
141 alloc_bootmem_cpumask_var(&cpu_initialized_mask); 145 size_t off = (size_t)pageno << PAGE_SHIFT;
142 alloc_bootmem_cpumask_var(&cpu_callin_mask);
143 alloc_bootmem_cpumask_var(&cpu_callout_mask);
144 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
145}
146 146
147#else /* CONFIG_X86_32 */ 147 if (off >= pcpur_size)
148 return NULL;
148 149
149static inline void setup_cpu_local_masks(void) 150 return virt_to_page(pcpur_ptrs[cpu] + off);
150{
151} 151}
152 152
153#endif /* CONFIG_X86_32 */ 153static ssize_t __init setup_pcpu_remap(size_t static_size)
154
155/*
156 * Great future plan:
157 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
158 * Always point %gs to its beginning
159 */
160void __init setup_per_cpu_areas(void)
161{ 154{
162 ssize_t size, old_size; 155 static struct vm_struct vm;
163 char *ptr; 156 pg_data_t *last;
164 int cpu; 157 size_t ptrs_size, dyn_size;
165 unsigned long align = 1; 158 unsigned int cpu;
166 159 ssize_t ret;
167 /* Setup cpu_pda map */ 160
168 setup_cpu_pda_map(); 161 /*
162 * If large page isn't supported, there's no benefit in doing
163 * this. Also, on non-NUMA, embedding is better.
164 */
165 if (!cpu_has_pse || pcpu_need_numa())
166 return -EINVAL;
167
168 last = NULL;
169 for_each_possible_cpu(cpu) {
170 int node = early_cpu_to_node(cpu);
169 171
170 /* Copy section for each CPU (we discard the original) */ 172 if (node_online(node) && NODE_DATA(node) &&
171 old_size = PERCPU_ENOUGH_ROOM; 173 last && last != NODE_DATA(node))
172 align = max_t(unsigned long, PAGE_SIZE, align); 174 goto proceed;
173 size = roundup(old_size, align);
174 175
175 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 176 last = NODE_DATA(node);
176 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 177 }
178 return -EINVAL;
179
180proceed:
181 /*
182 * Currently supports only single page. Supporting multiple
183 * pages won't be too difficult if it ever becomes necessary.
184 */
185 pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
186 PERCPU_DYNAMIC_RESERVE);
187 if (pcpur_size > PMD_SIZE) {
188 pr_warning("PERCPU: static data is larger than large page, "
189 "can't use large page\n");
190 return -EINVAL;
191 }
192 dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
177 193
178 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size); 194 /* allocate pointer array and alloc large pages */
195 ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
196 pcpur_ptrs = alloc_bootmem(ptrs_size);
179 197
180 for_each_possible_cpu(cpu) { 198 for_each_possible_cpu(cpu) {
181#ifndef CONFIG_NEED_MULTIPLE_NODES 199 pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
182 ptr = __alloc_bootmem(size, align, 200 if (!pcpur_ptrs[cpu])
183 __pa(MAX_DMA_ADDRESS)); 201 goto enomem;
184#else 202
185 int node = early_cpu_to_node(cpu); 203 /*
186 if (!node_online(node) || !NODE_DATA(node)) { 204 * Only use pcpur_size bytes and give back the rest.
187 ptr = __alloc_bootmem(size, align, 205 *
188 __pa(MAX_DMA_ADDRESS)); 206 * Ingo: The 2MB up-rounding bootmem is needed to make
189 pr_info("cpu %d has no node %d or node-local memory\n", 207 * sure the partial 2MB page is still fully RAM - it's
190 cpu, node); 208 * not well-specified to have a PAT-incompatible area
191 pr_debug("per cpu data for cpu%d at %016lx\n", 209 * (unmapped RAM, device memory, etc.) in that hole.
192 cpu, __pa(ptr)); 210 */
193 } else { 211 free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
194 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, 212 PMD_SIZE - pcpur_size);
195 __pa(MAX_DMA_ADDRESS)); 213
196 pr_debug("per cpu data for cpu%d on node%d at %016lx\n", 214 memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
197 cpu, node, __pa(ptr));
198 }
199#endif
200 per_cpu_offset(cpu) = ptr - __per_cpu_start;
201 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
202 } 215 }
203 216
204 /* Setup percpu data maps */ 217 /* allocate address and map */
205 setup_per_cpu_maps(); 218 vm.flags = VM_ALLOC;
206 219 vm.size = num_possible_cpus() * PMD_SIZE;
207 /* Setup node to cpumask map */ 220 vm_area_register_early(&vm, PMD_SIZE);
208 setup_node_to_cpumask_map();
209
210 /* Setup cpu initialized, callin, callout masks */
211 setup_cpu_local_masks();
212}
213
214#endif
215 221
216#ifdef X86_64_NUMA 222 for_each_possible_cpu(cpu) {
223 pmd_t *pmd;
217 224
218/* 225 pmd = populate_extra_pmd((unsigned long)vm.addr
219 * Allocate node_to_cpumask_map based on number of available nodes 226 + cpu * PMD_SIZE);
220 * Requires node_possible_map to be valid. 227 set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
221 * 228 PAGE_KERNEL_LARGE));
222 * Note: node_to_cpumask() is not valid until after this is done.
223 */
224static void __init setup_node_to_cpumask_map(void)
225{
226 unsigned int node, num = 0;
227 cpumask_t *map;
228
229 /* setup nr_node_ids if not done yet */
230 if (nr_node_ids == MAX_NUMNODES) {
231 for_each_node_mask(node, node_possible_map)
232 num = node;
233 nr_node_ids = num + 1;
234 } 229 }
235 230
236 /* allocate the map */ 231 /* we're ready, commit */
237 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); 232 pr_info("PERCPU: Remapped at %p with large pages, static data "
238 233 "%zu bytes\n", vm.addr, static_size);
239 pr_debug("Node to cpumask map at %p for %d nodes\n", 234
240 map, nr_node_ids); 235 ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
241 236 PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
242 /* node_to_cpumask() will now work */ 237 PMD_SIZE, vm.addr, NULL);
243 node_to_cpumask_map = map; 238 goto out_free_ar;
239
240enomem:
241 for_each_possible_cpu(cpu)
242 if (pcpur_ptrs[cpu])
243 free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
244 ret = -ENOMEM;
245out_free_ar:
246 free_bootmem(__pa(pcpur_ptrs), ptrs_size);
247 return ret;
244} 248}
245 249#else
246void __cpuinit numa_set_node(int cpu, int node) 250static ssize_t __init setup_pcpu_remap(size_t static_size)
247{ 251{
248 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); 252 return -EINVAL;
249
250 if (cpu_pda(cpu) && node != NUMA_NO_NODE)
251 cpu_pda(cpu)->nodenumber = node;
252
253 if (cpu_to_node_map)
254 cpu_to_node_map[cpu] = node;
255
256 else if (per_cpu_offset(cpu))
257 per_cpu(x86_cpu_to_node_map, cpu) = node;
258
259 else
260 pr_debug("Setting node for non-present cpu %d\n", cpu);
261} 253}
254#endif
262 255
263void __cpuinit numa_clear_node(int cpu) 256/*
257 * Embedding allocator
258 *
259 * The first chunk is sized to just contain the static area plus
260 * module and dynamic reserves and embedded into linear physical
261 * mapping so that it can use PMD mapping without additional TLB
262 * pressure.
263 */
264static ssize_t __init setup_pcpu_embed(size_t static_size)
264{ 265{
265 numa_set_node(cpu, NUMA_NO_NODE); 266 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
267
268 /*
269 * If large page isn't supported, there's no benefit in doing
270 * this. Also, embedding allocation doesn't play well with
271 * NUMA.
272 */
273 if (!cpu_has_pse || pcpu_need_numa())
274 return -EINVAL;
275
276 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
277 reserve - PERCPU_FIRST_CHUNK_RESERVE, -1);
266} 278}
267 279
268#ifndef CONFIG_DEBUG_PER_CPU_MAPS 280/*
281 * 4k page allocator
282 *
283 * This is the basic allocator. Static percpu area is allocated
284 * page-by-page and most of initialization is done by the generic
285 * setup function.
286 */
287static struct page **pcpu4k_pages __initdata;
288static int pcpu4k_nr_static_pages __initdata;
269 289
270void __cpuinit numa_add_cpu(int cpu) 290static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
271{ 291{
272 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 292 if (pageno < pcpu4k_nr_static_pages)
293 return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
294 return NULL;
273} 295}
274 296
275void __cpuinit numa_remove_cpu(int cpu) 297static void __init pcpu4k_populate_pte(unsigned long addr)
276{ 298{
277 cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]); 299 populate_extra_pte(addr);
278} 300}
279 301
280#else /* CONFIG_DEBUG_PER_CPU_MAPS */ 302static ssize_t __init setup_pcpu_4k(size_t static_size)
281
282/*
283 * --------- debug versions of the numa functions ---------
284 */
285static void __cpuinit numa_set_cpumask(int cpu, int enable)
286{ 303{
287 int node = cpu_to_node(cpu); 304 size_t pages_size;
288 cpumask_t *mask; 305 unsigned int cpu;
289 char buf[64]; 306 int i, j;
290 307 ssize_t ret;
291 if (node_to_cpumask_map == NULL) { 308
292 printk(KERN_ERR "node_to_cpumask_map NULL\n"); 309 pcpu4k_nr_static_pages = PFN_UP(static_size);
293 dump_stack(); 310
294 return; 311 /* unaligned allocations can't be freed, round up to page size */
295 } 312 pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
296 313 * sizeof(pcpu4k_pages[0]));
297 mask = &node_to_cpumask_map[node]; 314 pcpu4k_pages = alloc_bootmem(pages_size);
298 if (enable) 315
299 cpu_set(cpu, *mask); 316 /* allocate and copy */
300 else 317 j = 0;
301 cpu_clear(cpu, *mask); 318 for_each_possible_cpu(cpu)
319 for (i = 0; i < pcpu4k_nr_static_pages; i++) {
320 void *ptr;
321
322 ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
323 if (!ptr)
324 goto enomem;
325
326 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
327 pcpu4k_pages[j++] = virt_to_page(ptr);
328 }
302 329
303 cpulist_scnprintf(buf, sizeof(buf), mask); 330 /* we're ready, commit */
304 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 331 pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
305 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf); 332 pcpu4k_nr_static_pages, static_size);
333
334 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
335 PERCPU_FIRST_CHUNK_RESERVE, -1,
336 -1, NULL, pcpu4k_populate_pte);
337 goto out_free_ar;
338
339enomem:
340 while (--j >= 0)
341 free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
342 ret = -ENOMEM;
343out_free_ar:
344 free_bootmem(__pa(pcpu4k_pages), pages_size);
345 return ret;
306} 346}
307 347
308void __cpuinit numa_add_cpu(int cpu) 348static inline void setup_percpu_segment(int cpu)
309{ 349{
310 numa_set_cpumask(cpu, 1); 350#ifdef CONFIG_X86_32
311} 351 struct desc_struct gdt;
312
313void __cpuinit numa_remove_cpu(int cpu)
314{
315 numa_set_cpumask(cpu, 0);
316}
317 352
318int cpu_to_node(int cpu) 353 pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
319{ 354 0x2 | DESCTYPE_S, 0x8);
320 if (early_per_cpu_ptr(x86_cpu_to_node_map)) { 355 gdt.s = 1;
321 printk(KERN_WARNING 356 write_gdt_entry(get_cpu_gdt_table(cpu),
322 "cpu_to_node(%d): usage too early!\n", cpu); 357 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
323 dump_stack(); 358#endif
324 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
325 }
326 return per_cpu(x86_cpu_to_node_map, cpu);
327} 359}
328EXPORT_SYMBOL(cpu_to_node);
329 360
330/* 361/*
331 * Same function as cpu_to_node() but used if called before the 362 * Great future plan:
332 * per_cpu areas are setup. 363 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
364 * Always point %gs to its beginning
333 */ 365 */
334int early_cpu_to_node(int cpu) 366void __init setup_per_cpu_areas(void)
335{ 367{
336 if (early_per_cpu_ptr(x86_cpu_to_node_map)) 368 size_t static_size = __per_cpu_end - __per_cpu_start;
337 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; 369 unsigned int cpu;
338 370 unsigned long delta;
339 if (!per_cpu_offset(cpu)) { 371 size_t pcpu_unit_size;
340 printk(KERN_WARNING 372 ssize_t ret;
341 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
342 dump_stack();
343 return NUMA_NO_NODE;
344 }
345 return per_cpu(x86_cpu_to_node_map, cpu);
346}
347 373
374 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
375 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
348 376
349/* empty cpumask */ 377 /*
350static const cpumask_t cpu_mask_none; 378 * Allocate percpu area. If PSE is supported, try to make use
351 379 * of large page mappings. Please read comments on top of
352/* 380 * each allocator for details.
353 * Returns a pointer to the bitmask of CPUs on Node 'node'. 381 */
354 */ 382 ret = setup_pcpu_remap(static_size);
355const cpumask_t *cpumask_of_node(int node) 383 if (ret < 0)
356{ 384 ret = setup_pcpu_embed(static_size);
357 if (node_to_cpumask_map == NULL) { 385 if (ret < 0)
358 printk(KERN_WARNING 386 ret = setup_pcpu_4k(static_size);
359 "cpumask_of_node(%d): no node_to_cpumask_map!\n", 387 if (ret < 0)
360 node); 388 panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
361 dump_stack(); 389 static_size, ret);
362 return (const cpumask_t *)&cpu_online_map; 390
363 } 391 pcpu_unit_size = ret;
364 if (node >= nr_node_ids) { 392
365 printk(KERN_WARNING 393 /* alrighty, percpu areas up and running */
366 "cpumask_of_node(%d): node > nr_node_ids(%d)\n", 394 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
367 node, nr_node_ids); 395 for_each_possible_cpu(cpu) {
368 dump_stack(); 396 per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
369 return &cpu_mask_none; 397 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
370 } 398 per_cpu(cpu_number, cpu) = cpu;
371 return &node_to_cpumask_map[node]; 399 setup_percpu_segment(cpu);
372} 400 setup_stack_canary_segment(cpu);
373EXPORT_SYMBOL(cpumask_of_node); 401 /*
374 402 * Copy data used in early init routines from the
375/* 403 * initial arrays to the per cpu data areas. These
376 * Returns a bitmask of CPUs on Node 'node'. 404 * arrays then become expendable and the *_early_ptr's
377 * 405 * are zeroed indicating that the static arrays are
378 * Side note: this function creates the returned cpumask on the stack 406 * gone.
379 * so with a high NR_CPUS count, excessive stack space is used. The 407 */
380 * node_to_cpumask_ptr function should be used whenever possible. 408#ifdef CONFIG_X86_LOCAL_APIC
381 */ 409 per_cpu(x86_cpu_to_apicid, cpu) =
382cpumask_t node_to_cpumask(int node) 410 early_per_cpu_map(x86_cpu_to_apicid, cpu);
383{ 411 per_cpu(x86_bios_cpu_apicid, cpu) =
384 if (node_to_cpumask_map == NULL) { 412 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
385 printk(KERN_WARNING 413#endif
386 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node); 414#ifdef CONFIG_X86_64
387 dump_stack(); 415 per_cpu(irq_stack_ptr, cpu) =
388 return cpu_online_map; 416 per_cpu(irq_stack_union.irq_stack, cpu) +
389 } 417 IRQ_STACK_SIZE - 64;
390 if (node >= nr_node_ids) { 418#ifdef CONFIG_NUMA
391 printk(KERN_WARNING 419 per_cpu(x86_cpu_to_node_map, cpu) =
392 "node_to_cpumask(%d): node > nr_node_ids(%d)\n", 420 early_per_cpu_map(x86_cpu_to_node_map, cpu);
393 node, nr_node_ids); 421#endif
394 dump_stack(); 422#endif
395 return cpu_mask_none; 423 /*
424 * Up to this point, the boot CPU has been using .data.init
425 * area. Reload any changed state for the boot CPU.
426 */
427 if (cpu == boot_cpu_id)
428 switch_to_new_gdt(cpu);
396 } 429 }
397 return node_to_cpumask_map[node];
398}
399EXPORT_SYMBOL(node_to_cpumask);
400 430
401/* 431 /* indicate the early static arrays will soon be gone */
402 * --------- end of debug versions of the numa functions --------- 432#ifdef CONFIG_X86_LOCAL_APIC
403 */ 433 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
404 434 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
405#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ 435#endif
436#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
437 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
438#endif
406 439
407#endif /* X86_64_NUMA */ 440 /* Setup node to cpumask map */
441 setup_node_to_cpumask_map();
408 442
443 /* Setup cpu initialized, callin, callout masks */
444 setup_cpu_local_masks();
445}
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index df0587f24c5..d2cc6428c58 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -50,27 +50,23 @@
50# define FIX_EFLAGS __FIX_EFLAGS 50# define FIX_EFLAGS __FIX_EFLAGS
51#endif 51#endif
52 52
53#define COPY(x) { \ 53#define COPY(x) do { \
54 err |= __get_user(regs->x, &sc->x); \ 54 get_user_ex(regs->x, &sc->x); \
55} 55} while (0)
56 56
57#define COPY_SEG(seg) { \ 57#define GET_SEG(seg) ({ \
58 unsigned short tmp; \ 58 unsigned short tmp; \
59 err |= __get_user(tmp, &sc->seg); \ 59 get_user_ex(tmp, &sc->seg); \
60 regs->seg = tmp; \ 60 tmp; \
61} 61})
62 62
63#define COPY_SEG_CPL3(seg) { \ 63#define COPY_SEG(seg) do { \
64 unsigned short tmp; \ 64 regs->seg = GET_SEG(seg); \
65 err |= __get_user(tmp, &sc->seg); \ 65} while (0)
66 regs->seg = tmp | 3; \
67}
68 66
69#define GET_SEG(seg) { \ 67#define COPY_SEG_CPL3(seg) do { \
70 unsigned short tmp; \ 68 regs->seg = GET_SEG(seg) | 3; \
71 err |= __get_user(tmp, &sc->seg); \ 69} while (0)
72 loadsegment(seg, tmp); \
73}
74 70
75static int 71static int
76restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, 72restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
@@ -83,45 +79,49 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
83 /* Always make any pending restarted system calls return -EINTR */ 79 /* Always make any pending restarted system calls return -EINTR */
84 current_thread_info()->restart_block.fn = do_no_restart_syscall; 80 current_thread_info()->restart_block.fn = do_no_restart_syscall;
85 81
82 get_user_try {
83
86#ifdef CONFIG_X86_32 84#ifdef CONFIG_X86_32
87 GET_SEG(gs); 85 set_user_gs(regs, GET_SEG(gs));
88 COPY_SEG(fs); 86 COPY_SEG(fs);
89 COPY_SEG(es); 87 COPY_SEG(es);
90 COPY_SEG(ds); 88 COPY_SEG(ds);
91#endif /* CONFIG_X86_32 */ 89#endif /* CONFIG_X86_32 */
92 90
93 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); 91 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
94 COPY(dx); COPY(cx); COPY(ip); 92 COPY(dx); COPY(cx); COPY(ip);
95 93
96#ifdef CONFIG_X86_64 94#ifdef CONFIG_X86_64
97 COPY(r8); 95 COPY(r8);
98 COPY(r9); 96 COPY(r9);
99 COPY(r10); 97 COPY(r10);
100 COPY(r11); 98 COPY(r11);
101 COPY(r12); 99 COPY(r12);
102 COPY(r13); 100 COPY(r13);
103 COPY(r14); 101 COPY(r14);
104 COPY(r15); 102 COPY(r15);
105#endif /* CONFIG_X86_64 */ 103#endif /* CONFIG_X86_64 */
106 104
107#ifdef CONFIG_X86_32 105#ifdef CONFIG_X86_32
108 COPY_SEG_CPL3(cs); 106 COPY_SEG_CPL3(cs);
109 COPY_SEG_CPL3(ss); 107 COPY_SEG_CPL3(ss);
110#else /* !CONFIG_X86_32 */ 108#else /* !CONFIG_X86_32 */
111 /* Kernel saves and restores only the CS segment register on signals, 109 /* Kernel saves and restores only the CS segment register on signals,
112 * which is the bare minimum needed to allow mixed 32/64-bit code. 110 * which is the bare minimum needed to allow mixed 32/64-bit code.
113 * App's signal handler can save/restore other segments if needed. */ 111 * App's signal handler can save/restore other segments if needed. */
114 COPY_SEG_CPL3(cs); 112 COPY_SEG_CPL3(cs);
115#endif /* CONFIG_X86_32 */ 113#endif /* CONFIG_X86_32 */
116 114
117 err |= __get_user(tmpflags, &sc->flags); 115 get_user_ex(tmpflags, &sc->flags);
118 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); 116 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
119 regs->orig_ax = -1; /* disable syscall checks */ 117 regs->orig_ax = -1; /* disable syscall checks */
120 118
121 err |= __get_user(buf, &sc->fpstate); 119 get_user_ex(buf, &sc->fpstate);
122 err |= restore_i387_xstate(buf); 120 err |= restore_i387_xstate(buf);
121
122 get_user_ex(*pax, &sc->ax);
123 } get_user_catch(err);
123 124
124 err |= __get_user(*pax, &sc->ax);
125 return err; 125 return err;
126} 126}
127 127
@@ -131,57 +131,55 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
131{ 131{
132 int err = 0; 132 int err = 0;
133 133
134#ifdef CONFIG_X86_32 134 put_user_try {
135 {
136 unsigned int tmp;
137 135
138 savesegment(gs, tmp); 136#ifdef CONFIG_X86_32
139 err |= __put_user(tmp, (unsigned int __user *)&sc->gs); 137 put_user_ex(get_user_gs(regs), (unsigned int __user *)&sc->gs);
140 } 138 put_user_ex(regs->fs, (unsigned int __user *)&sc->fs);
141 err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs); 139 put_user_ex(regs->es, (unsigned int __user *)&sc->es);
142 err |= __put_user(regs->es, (unsigned int __user *)&sc->es); 140 put_user_ex(regs->ds, (unsigned int __user *)&sc->ds);
143 err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds);
144#endif /* CONFIG_X86_32 */ 141#endif /* CONFIG_X86_32 */
145 142
146 err |= __put_user(regs->di, &sc->di); 143 put_user_ex(regs->di, &sc->di);
147 err |= __put_user(regs->si, &sc->si); 144 put_user_ex(regs->si, &sc->si);
148 err |= __put_user(regs->bp, &sc->bp); 145 put_user_ex(regs->bp, &sc->bp);
149 err |= __put_user(regs->sp, &sc->sp); 146 put_user_ex(regs->sp, &sc->sp);
150 err |= __put_user(regs->bx, &sc->bx); 147 put_user_ex(regs->bx, &sc->bx);
151 err |= __put_user(regs->dx, &sc->dx); 148 put_user_ex(regs->dx, &sc->dx);
152 err |= __put_user(regs->cx, &sc->cx); 149 put_user_ex(regs->cx, &sc->cx);
153 err |= __put_user(regs->ax, &sc->ax); 150 put_user_ex(regs->ax, &sc->ax);
154#ifdef CONFIG_X86_64 151#ifdef CONFIG_X86_64
155 err |= __put_user(regs->r8, &sc->r8); 152 put_user_ex(regs->r8, &sc->r8);
156 err |= __put_user(regs->r9, &sc->r9); 153 put_user_ex(regs->r9, &sc->r9);
157 err |= __put_user(regs->r10, &sc->r10); 154 put_user_ex(regs->r10, &sc->r10);
158 err |= __put_user(regs->r11, &sc->r11); 155 put_user_ex(regs->r11, &sc->r11);
159 err |= __put_user(regs->r12, &sc->r12); 156 put_user_ex(regs->r12, &sc->r12);
160 err |= __put_user(regs->r13, &sc->r13); 157 put_user_ex(regs->r13, &sc->r13);
161 err |= __put_user(regs->r14, &sc->r14); 158 put_user_ex(regs->r14, &sc->r14);
162 err |= __put_user(regs->r15, &sc->r15); 159 put_user_ex(regs->r15, &sc->r15);
163#endif /* CONFIG_X86_64 */ 160#endif /* CONFIG_X86_64 */
164 161
165 err |= __put_user(current->thread.trap_no, &sc->trapno); 162 put_user_ex(current->thread.trap_no, &sc->trapno);
166 err |= __put_user(current->thread.error_code, &sc->err); 163 put_user_ex(current->thread.error_code, &sc->err);
167 err |= __put_user(regs->ip, &sc->ip); 164 put_user_ex(regs->ip, &sc->ip);
168#ifdef CONFIG_X86_32 165#ifdef CONFIG_X86_32
169 err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs); 166 put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
170 err |= __put_user(regs->flags, &sc->flags); 167 put_user_ex(regs->flags, &sc->flags);
171 err |= __put_user(regs->sp, &sc->sp_at_signal); 168 put_user_ex(regs->sp, &sc->sp_at_signal);
172 err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); 169 put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
173#else /* !CONFIG_X86_32 */ 170#else /* !CONFIG_X86_32 */
174 err |= __put_user(regs->flags, &sc->flags); 171 put_user_ex(regs->flags, &sc->flags);
175 err |= __put_user(regs->cs, &sc->cs); 172 put_user_ex(regs->cs, &sc->cs);
176 err |= __put_user(0, &sc->gs); 173 put_user_ex(0, &sc->gs);
177 err |= __put_user(0, &sc->fs); 174 put_user_ex(0, &sc->fs);
178#endif /* CONFIG_X86_32 */ 175#endif /* CONFIG_X86_32 */
179 176
180 err |= __put_user(fpstate, &sc->fpstate); 177 put_user_ex(fpstate, &sc->fpstate);
181 178
182 /* non-iBCS2 extensions.. */ 179 /* non-iBCS2 extensions.. */
183 err |= __put_user(mask, &sc->oldmask); 180 put_user_ex(mask, &sc->oldmask);
184 err |= __put_user(current->thread.cr2, &sc->cr2); 181 put_user_ex(current->thread.cr2, &sc->cr2);
182 } put_user_catch(err);
185 183
186 return err; 184 return err;
187} 185}
@@ -189,40 +187,35 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
189/* 187/*
190 * Set up a signal frame. 188 * Set up a signal frame.
191 */ 189 */
192#ifdef CONFIG_X86_32
193static const struct {
194 u16 poplmovl;
195 u32 val;
196 u16 int80;
197} __attribute__((packed)) retcode = {
198 0xb858, /* popl %eax; movl $..., %eax */
199 __NR_sigreturn,
200 0x80cd, /* int $0x80 */
201};
202
203static const struct {
204 u8 movl;
205 u32 val;
206 u16 int80;
207 u8 pad;
208} __attribute__((packed)) rt_retcode = {
209 0xb8, /* movl $..., %eax */
210 __NR_rt_sigreturn,
211 0x80cd, /* int $0x80 */
212 0
213};
214 190
215/* 191/*
216 * Determine which stack to use.. 192 * Determine which stack to use..
217 */ 193 */
194static unsigned long align_sigframe(unsigned long sp)
195{
196#ifdef CONFIG_X86_32
197 /*
198 * Align the stack pointer according to the i386 ABI,
199 * i.e. so that on function entry ((sp + 4) & 15) == 0.
200 */
201 sp = ((sp + 4) & -16ul) - 4;
202#else /* !CONFIG_X86_32 */
203 sp = round_down(sp, 16) - 8;
204#endif
205 return sp;
206}
207
218static inline void __user * 208static inline void __user *
219get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, 209get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
220 void **fpstate) 210 void __user **fpstate)
221{ 211{
222 unsigned long sp;
223
224 /* Default to using normal stack */ 212 /* Default to using normal stack */
225 sp = regs->sp; 213 unsigned long sp = regs->sp;
214
215#ifdef CONFIG_X86_64
216 /* redzone */
217 sp -= 128;
218#endif /* CONFIG_X86_64 */
226 219
227 /* 220 /*
228 * If we are on the alternate signal stack and would overflow it, don't. 221 * If we are on the alternate signal stack and would overflow it, don't.
@@ -236,30 +229,52 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
236 if (sas_ss_flags(sp) == 0) 229 if (sas_ss_flags(sp) == 0)
237 sp = current->sas_ss_sp + current->sas_ss_size; 230 sp = current->sas_ss_sp + current->sas_ss_size;
238 } else { 231 } else {
232#ifdef CONFIG_X86_32
239 /* This is the legacy signal stack switching. */ 233 /* This is the legacy signal stack switching. */
240 if ((regs->ss & 0xffff) != __USER_DS && 234 if ((regs->ss & 0xffff) != __USER_DS &&
241 !(ka->sa.sa_flags & SA_RESTORER) && 235 !(ka->sa.sa_flags & SA_RESTORER) &&
242 ka->sa.sa_restorer) 236 ka->sa.sa_restorer)
243 sp = (unsigned long) ka->sa.sa_restorer; 237 sp = (unsigned long) ka->sa.sa_restorer;
238#endif /* CONFIG_X86_32 */
244 } 239 }
245 240
246 if (used_math()) { 241 if (used_math()) {
247 sp = sp - sig_xstate_size; 242 sp -= sig_xstate_size;
248 *fpstate = (struct _fpstate *) sp; 243#ifdef CONFIG_X86_64
244 sp = round_down(sp, 64);
245#endif /* CONFIG_X86_64 */
246 *fpstate = (void __user *)sp;
247
249 if (save_i387_xstate(*fpstate) < 0) 248 if (save_i387_xstate(*fpstate) < 0)
250 return (void __user *)-1L; 249 return (void __user *)-1L;
251 } 250 }
252 251
253 sp -= frame_size; 252 return (void __user *)align_sigframe(sp - frame_size);
254 /*
255 * Align the stack pointer according to the i386 ABI,
256 * i.e. so that on function entry ((sp + 4) & 15) == 0.
257 */
258 sp = ((sp + 4) & -16ul) - 4;
259
260 return (void __user *) sp;
261} 253}
262 254
255#ifdef CONFIG_X86_32
256static const struct {
257 u16 poplmovl;
258 u32 val;
259 u16 int80;
260} __attribute__((packed)) retcode = {
261 0xb858, /* popl %eax; movl $..., %eax */
262 __NR_sigreturn,
263 0x80cd, /* int $0x80 */
264};
265
266static const struct {
267 u8 movl;
268 u32 val;
269 u16 int80;
270 u8 pad;
271} __attribute__((packed)) rt_retcode = {
272 0xb8, /* movl $..., %eax */
273 __NR_rt_sigreturn,
274 0x80cd, /* int $0x80 */
275 0
276};
277
263static int 278static int
264__setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, 279__setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
265 struct pt_regs *regs) 280 struct pt_regs *regs)
@@ -336,43 +351,41 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
336 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 351 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
337 return -EFAULT; 352 return -EFAULT;
338 353
339 err |= __put_user(sig, &frame->sig); 354 put_user_try {
340 err |= __put_user(&frame->info, &frame->pinfo); 355 put_user_ex(sig, &frame->sig);
341 err |= __put_user(&frame->uc, &frame->puc); 356 put_user_ex(&frame->info, &frame->pinfo);
342 err |= copy_siginfo_to_user(&frame->info, info); 357 put_user_ex(&frame->uc, &frame->puc);
343 if (err) 358 err |= copy_siginfo_to_user(&frame->info, info);
344 return -EFAULT;
345 359
346 /* Create the ucontext. */ 360 /* Create the ucontext. */
347 if (cpu_has_xsave) 361 if (cpu_has_xsave)
348 err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); 362 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
349 else 363 else
350 err |= __put_user(0, &frame->uc.uc_flags); 364 put_user_ex(0, &frame->uc.uc_flags);
351 err |= __put_user(0, &frame->uc.uc_link); 365 put_user_ex(0, &frame->uc.uc_link);
352 err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); 366 put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
353 err |= __put_user(sas_ss_flags(regs->sp), 367 put_user_ex(sas_ss_flags(regs->sp),
354 &frame->uc.uc_stack.ss_flags); 368 &frame->uc.uc_stack.ss_flags);
355 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); 369 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
356 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, 370 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
357 regs, set->sig[0]); 371 regs, set->sig[0]);
358 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 372 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
359 if (err) 373
360 return -EFAULT; 374 /* Set up to return from userspace. */
361 375 restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
362 /* Set up to return from userspace. */ 376 if (ka->sa.sa_flags & SA_RESTORER)
363 restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); 377 restorer = ka->sa.sa_restorer;
364 if (ka->sa.sa_flags & SA_RESTORER) 378 put_user_ex(restorer, &frame->pretcode);
365 restorer = ka->sa.sa_restorer;
366 err |= __put_user(restorer, &frame->pretcode);
367 379
368 /* 380 /*
369 * This is movl $__NR_rt_sigreturn, %ax ; int $0x80 381 * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
370 * 382 *
371 * WE DO NOT USE IT ANY MORE! It's only left here for historical 383 * WE DO NOT USE IT ANY MORE! It's only left here for historical
372 * reasons and because gdb uses it as a signature to notice 384 * reasons and because gdb uses it as a signature to notice
373 * signal handler stack frames. 385 * signal handler stack frames.
374 */ 386 */
375 err |= __put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode); 387 put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
388 } put_user_catch(err);
376 389
377 if (err) 390 if (err)
378 return -EFAULT; 391 return -EFAULT;
@@ -392,24 +405,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
392 return 0; 405 return 0;
393} 406}
394#else /* !CONFIG_X86_32 */ 407#else /* !CONFIG_X86_32 */
395/*
396 * Determine which stack to use..
397 */
398static void __user *
399get_stack(struct k_sigaction *ka, unsigned long sp, unsigned long size)
400{
401 /* Default to using normal stack - redzone*/
402 sp -= 128;
403
404 /* This is the X/Open sanctioned signal stack switching. */
405 if (ka->sa.sa_flags & SA_ONSTACK) {
406 if (sas_ss_flags(sp) == 0)
407 sp = current->sas_ss_sp + current->sas_ss_size;
408 }
409
410 return (void __user *)round_down(sp - size, 64);
411}
412
413static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 408static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
414 sigset_t *set, struct pt_regs *regs) 409 sigset_t *set, struct pt_regs *regs)
415{ 410{
@@ -418,15 +413,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
418 int err = 0; 413 int err = 0;
419 struct task_struct *me = current; 414 struct task_struct *me = current;
420 415
421 if (used_math()) { 416 frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe), &fp);
422 fp = get_stack(ka, regs->sp, sig_xstate_size);
423 frame = (void __user *)round_down(
424 (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
425
426 if (save_i387_xstate(fp) < 0)
427 return -EFAULT;
428 } else
429 frame = get_stack(ka, regs->sp, sizeof(struct rt_sigframe)) - 8;
430 417
431 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 418 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
432 return -EFAULT; 419 return -EFAULT;
@@ -436,28 +423,30 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
436 return -EFAULT; 423 return -EFAULT;
437 } 424 }
438 425
439 /* Create the ucontext. */ 426 put_user_try {
440 if (cpu_has_xsave) 427 /* Create the ucontext. */
441 err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); 428 if (cpu_has_xsave)
442 else 429 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
443 err |= __put_user(0, &frame->uc.uc_flags); 430 else
444 err |= __put_user(0, &frame->uc.uc_link); 431 put_user_ex(0, &frame->uc.uc_flags);
445 err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); 432 put_user_ex(0, &frame->uc.uc_link);
446 err |= __put_user(sas_ss_flags(regs->sp), 433 put_user_ex(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
447 &frame->uc.uc_stack.ss_flags); 434 put_user_ex(sas_ss_flags(regs->sp),
448 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); 435 &frame->uc.uc_stack.ss_flags);
449 err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]); 436 put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
450 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 437 err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
451 438 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
452 /* Set up to return from userspace. If provided, use a stub 439
453 already in userspace. */ 440 /* Set up to return from userspace. If provided, use a stub
454 /* x86-64 should always use SA_RESTORER. */ 441 already in userspace. */
455 if (ka->sa.sa_flags & SA_RESTORER) { 442 /* x86-64 should always use SA_RESTORER. */
456 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); 443 if (ka->sa.sa_flags & SA_RESTORER) {
457 } else { 444 put_user_ex(ka->sa.sa_restorer, &frame->pretcode);
458 /* could use a vstub here */ 445 } else {
459 return -EFAULT; 446 /* could use a vstub here */
460 } 447 err |= -EFAULT;
448 }
449 } put_user_catch(err);
461 450
462 if (err) 451 if (err)
463 return -EFAULT; 452 return -EFAULT;
@@ -509,31 +498,41 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
509 struct old_sigaction __user *oact) 498 struct old_sigaction __user *oact)
510{ 499{
511 struct k_sigaction new_ka, old_ka; 500 struct k_sigaction new_ka, old_ka;
512 int ret; 501 int ret = 0;
513 502
514 if (act) { 503 if (act) {
515 old_sigset_t mask; 504 old_sigset_t mask;
516 505
517 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 506 if (!access_ok(VERIFY_READ, act, sizeof(*act)))
518 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
519 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
520 return -EFAULT; 507 return -EFAULT;
521 508
522 __get_user(new_ka.sa.sa_flags, &act->sa_flags); 509 get_user_try {
523 __get_user(mask, &act->sa_mask); 510 get_user_ex(new_ka.sa.sa_handler, &act->sa_handler);
511 get_user_ex(new_ka.sa.sa_flags, &act->sa_flags);
512 get_user_ex(mask, &act->sa_mask);
513 get_user_ex(new_ka.sa.sa_restorer, &act->sa_restorer);
514 } get_user_catch(ret);
515
516 if (ret)
517 return -EFAULT;
524 siginitset(&new_ka.sa.sa_mask, mask); 518 siginitset(&new_ka.sa.sa_mask, mask);
525 } 519 }
526 520
527 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 521 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
528 522
529 if (!ret && oact) { 523 if (!ret && oact) {
530 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 524 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
531 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
532 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
533 return -EFAULT; 525 return -EFAULT;
534 526
535 __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 527 put_user_try {
536 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); 528 put_user_ex(old_ka.sa.sa_handler, &oact->sa_handler);
529 put_user_ex(old_ka.sa.sa_flags, &oact->sa_flags);
530 put_user_ex(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
531 put_user_ex(old_ka.sa.sa_restorer, &oact->sa_restorer);
532 } put_user_catch(ret);
533
534 if (ret)
535 return -EFAULT;
537 } 536 }
538 537
539 return ret; 538 return ret;
@@ -541,14 +540,9 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
541#endif /* CONFIG_X86_32 */ 540#endif /* CONFIG_X86_32 */
542 541
543#ifdef CONFIG_X86_32 542#ifdef CONFIG_X86_32
544asmlinkage int sys_sigaltstack(unsigned long bx) 543int sys_sigaltstack(struct pt_regs *regs)
545{ 544{
546 /* 545 const stack_t __user *uss = (const stack_t __user *)regs->bx;
547 * This is needed to make gcc realize it doesn't own the
548 * "struct pt_regs"
549 */
550 struct pt_regs *regs = (struct pt_regs *)&bx;
551 const stack_t __user *uss = (const stack_t __user *)bx;
552 stack_t __user *uoss = (stack_t __user *)regs->cx; 546 stack_t __user *uoss = (stack_t __user *)regs->cx;
553 547
554 return do_sigaltstack(uss, uoss, regs->sp); 548 return do_sigaltstack(uss, uoss, regs->sp);
@@ -566,14 +560,12 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
566 * Do a signal return; undo the signal stack. 560 * Do a signal return; undo the signal stack.
567 */ 561 */
568#ifdef CONFIG_X86_32 562#ifdef CONFIG_X86_32
569asmlinkage unsigned long sys_sigreturn(unsigned long __unused) 563unsigned long sys_sigreturn(struct pt_regs *regs)
570{ 564{
571 struct sigframe __user *frame; 565 struct sigframe __user *frame;
572 struct pt_regs *regs;
573 unsigned long ax; 566 unsigned long ax;
574 sigset_t set; 567 sigset_t set;
575 568
576 regs = (struct pt_regs *) &__unused;
577 frame = (struct sigframe __user *)(regs->sp - 8); 569 frame = (struct sigframe __user *)(regs->sp - 8);
578 570
579 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 571 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@@ -600,7 +592,7 @@ badframe:
600} 592}
601#endif /* CONFIG_X86_32 */ 593#endif /* CONFIG_X86_32 */
602 594
603static long do_rt_sigreturn(struct pt_regs *regs) 595long sys_rt_sigreturn(struct pt_regs *regs)
604{ 596{
605 struct rt_sigframe __user *frame; 597 struct rt_sigframe __user *frame;
606 unsigned long ax; 598 unsigned long ax;
@@ -631,25 +623,6 @@ badframe:
631 return 0; 623 return 0;
632} 624}
633 625
634#ifdef CONFIG_X86_32
635/*
636 * Note: do not pass in pt_regs directly as with tail-call optimization
637 * GCC will incorrectly stomp on the caller's frame and corrupt user-space
638 * register state:
639 */
640asmlinkage int sys_rt_sigreturn(unsigned long __unused)
641{
642 struct pt_regs *regs = (struct pt_regs *)&__unused;
643
644 return do_rt_sigreturn(regs);
645}
646#else /* !CONFIG_X86_32 */
647asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
648{
649 return do_rt_sigreturn(regs);
650}
651#endif /* CONFIG_X86_32 */
652
653/* 626/*
654 * OK, we're invoking a handler: 627 * OK, we're invoking a handler:
655 */ 628 */
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index e6faa3316bd..13f33ea8cca 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -2,7 +2,7 @@
2 * Intel SMP support routines. 2 * Intel SMP support routines.
3 * 3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> 4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> 5 * (c) 1998-99, 2000, 2009 Ingo Molnar <mingo@redhat.com>
6 * (c) 2002,2003 Andi Kleen, SuSE Labs. 6 * (c) 2002,2003 Andi Kleen, SuSE Labs.
7 * 7 *
8 * i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com> 8 * i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
@@ -26,8 +26,7 @@
26#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
27#include <asm/mmu_context.h> 27#include <asm/mmu_context.h>
28#include <asm/proto.h> 28#include <asm/proto.h>
29#include <mach_ipi.h> 29#include <asm/apic.h>
30#include <mach_apic.h>
31/* 30/*
32 * Some notes on x86 processor bugs affecting SMP operation: 31 * Some notes on x86 processor bugs affecting SMP operation:
33 * 32 *
@@ -118,12 +117,12 @@ static void native_smp_send_reschedule(int cpu)
118 WARN_ON(1); 117 WARN_ON(1);
119 return; 118 return;
120 } 119 }
121 send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); 120 apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
122} 121}
123 122
124void native_send_call_func_single_ipi(int cpu) 123void native_send_call_func_single_ipi(int cpu)
125{ 124{
126 send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR); 125 apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
127} 126}
128 127
129void native_send_call_func_ipi(const struct cpumask *mask) 128void native_send_call_func_ipi(const struct cpumask *mask)
@@ -131,7 +130,7 @@ void native_send_call_func_ipi(const struct cpumask *mask)
131 cpumask_var_t allbutself; 130 cpumask_var_t allbutself;
132 131
133 if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) { 132 if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
134 send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 133 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
135 return; 134 return;
136 } 135 }
137 136
@@ -140,9 +139,9 @@ void native_send_call_func_ipi(const struct cpumask *mask)
140 139
141 if (cpumask_equal(mask, allbutself) && 140 if (cpumask_equal(mask, allbutself) &&
142 cpumask_equal(cpu_online_mask, cpu_callout_mask)) 141 cpumask_equal(cpu_online_mask, cpu_callout_mask))
143 send_IPI_allbutself(CALL_FUNCTION_VECTOR); 142 apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
144 else 143 else
145 send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 144 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
146 145
147 free_cpumask_var(allbutself); 146 free_cpumask_var(allbutself);
148} 147}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index bb1a3b1fc87..ef7d10170c3 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -2,7 +2,7 @@
2 * x86 SMP booting functions 2 * x86 SMP booting functions
3 * 3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> 4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> 5 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs. 6 * Copyright 2001 Andi Kleen, SuSE Labs.
7 * 7 *
8 * Much of the core SMP work is based on previous work by Thomas Radke, to 8 * Much of the core SMP work is based on previous work by Thomas Radke, to
@@ -53,7 +53,6 @@
53#include <asm/nmi.h> 53#include <asm/nmi.h>
54#include <asm/irq.h> 54#include <asm/irq.h>
55#include <asm/idle.h> 55#include <asm/idle.h>
56#include <asm/smp.h>
57#include <asm/trampoline.h> 56#include <asm/trampoline.h>
58#include <asm/cpu.h> 57#include <asm/cpu.h>
59#include <asm/numa.h> 58#include <asm/numa.h>
@@ -61,13 +60,12 @@
61#include <asm/tlbflush.h> 60#include <asm/tlbflush.h>
62#include <asm/mtrr.h> 61#include <asm/mtrr.h>
63#include <asm/vmi.h> 62#include <asm/vmi.h>
64#include <asm/genapic.h> 63#include <asm/apic.h>
65#include <asm/setup.h> 64#include <asm/setup.h>
65#include <asm/uv/uv.h>
66#include <linux/mc146818rtc.h> 66#include <linux/mc146818rtc.h>
67 67
68#include <mach_apic.h> 68#include <asm/smpboot_hooks.h>
69#include <mach_wakecpu.h>
70#include <smpboot_hooks.h>
71 69
72#ifdef CONFIG_X86_32 70#ifdef CONFIG_X86_32
73u8 apicid_2_node[MAX_APICID]; 71u8 apicid_2_node[MAX_APICID];
@@ -114,11 +112,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
114DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); 112DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
115EXPORT_PER_CPU_SYMBOL(cpu_info); 113EXPORT_PER_CPU_SYMBOL(cpu_info);
116 114
117static atomic_t init_deasserted; 115atomic_t init_deasserted;
118
119
120/* Set if we find a B stepping CPU */
121static int __cpuinitdata smp_b_stepping;
122 116
123#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) 117#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
124 118
@@ -163,7 +157,7 @@ static void map_cpu_to_logical_apicid(void)
163{ 157{
164 int cpu = smp_processor_id(); 158 int cpu = smp_processor_id();
165 int apicid = logical_smp_processor_id(); 159 int apicid = logical_smp_processor_id();
166 int node = apicid_to_node(apicid); 160 int node = apic->apicid_to_node(apicid);
167 161
168 if (!node_online(node)) 162 if (!node_online(node))
169 node = first_online_node; 163 node = first_online_node;
@@ -196,7 +190,8 @@ static void __cpuinit smp_callin(void)
196 * our local APIC. We have to wait for the IPI or we'll 190 * our local APIC. We have to wait for the IPI or we'll
197 * lock up on an APIC access. 191 * lock up on an APIC access.
198 */ 192 */
199 wait_for_init_deassert(&init_deasserted); 193 if (apic->wait_for_init_deassert)
194 apic->wait_for_init_deassert(&init_deasserted);
200 195
201 /* 196 /*
202 * (This works even if the APIC is not enabled.) 197 * (This works even if the APIC is not enabled.)
@@ -243,7 +238,8 @@ static void __cpuinit smp_callin(void)
243 */ 238 */
244 239
245 pr_debug("CALLIN, before setup_local_APIC().\n"); 240 pr_debug("CALLIN, before setup_local_APIC().\n");
246 smp_callin_clear_local_apic(); 241 if (apic->smp_callin_clear_local_apic)
242 apic->smp_callin_clear_local_apic();
247 setup_local_APIC(); 243 setup_local_APIC();
248 end_local_APIC_setup(); 244 end_local_APIC_setup();
249 map_cpu_to_logical_apicid(); 245 map_cpu_to_logical_apicid();
@@ -271,8 +267,6 @@ static void __cpuinit smp_callin(void)
271 cpumask_set_cpu(cpuid, cpu_callin_mask); 267 cpumask_set_cpu(cpuid, cpu_callin_mask);
272} 268}
273 269
274static int __cpuinitdata unsafe_smp;
275
276/* 270/*
277 * Activate a secondary processor. 271 * Activate a secondary processor.
278 */ 272 */
@@ -340,76 +334,6 @@ notrace static void __cpuinit start_secondary(void *unused)
340 cpu_idle(); 334 cpu_idle();
341} 335}
342 336
343static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
344{
345 /*
346 * Mask B, Pentium, but not Pentium MMX
347 */
348 if (c->x86_vendor == X86_VENDOR_INTEL &&
349 c->x86 == 5 &&
350 c->x86_mask >= 1 && c->x86_mask <= 4 &&
351 c->x86_model <= 3)
352 /*
353 * Remember we have B step Pentia with bugs
354 */
355 smp_b_stepping = 1;
356
357 /*
358 * Certain Athlons might work (for various values of 'work') in SMP
359 * but they are not certified as MP capable.
360 */
361 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
362
363 if (num_possible_cpus() == 1)
364 goto valid_k7;
365
366 /* Athlon 660/661 is valid. */
367 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
368 (c->x86_mask == 1)))
369 goto valid_k7;
370
371 /* Duron 670 is valid */
372 if ((c->x86_model == 7) && (c->x86_mask == 0))
373 goto valid_k7;
374
375 /*
376 * Athlon 662, Duron 671, and Athlon >model 7 have capability
377 * bit. It's worth noting that the A5 stepping (662) of some
378 * Athlon XP's have the MP bit set.
379 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
380 * more.
381 */
382 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
383 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
384 (c->x86_model > 7))
385 if (cpu_has_mp)
386 goto valid_k7;
387
388 /* If we get here, not a certified SMP capable AMD system. */
389 unsafe_smp = 1;
390 }
391
392valid_k7:
393 ;
394}
395
396static void __cpuinit smp_checks(void)
397{
398 if (smp_b_stepping)
399 printk(KERN_WARNING "WARNING: SMP operation may be unreliable"
400 "with B stepping processors.\n");
401
402 /*
403 * Don't taint if we are running SMP kernel on a single non-MP
404 * approved Athlon
405 */
406 if (unsafe_smp && num_online_cpus() > 1) {
407 printk(KERN_INFO "WARNING: This combination of AMD"
408 "processors is not suitable for SMP.\n");
409 add_taint(TAINT_UNSAFE_SMP);
410 }
411}
412
413/* 337/*
414 * The bootstrap kernel entry code has set these up. Save them for 338 * The bootstrap kernel entry code has set these up. Save them for
415 * a given CPU 339 * a given CPU
@@ -423,7 +347,6 @@ void __cpuinit smp_store_cpu_info(int id)
423 c->cpu_index = id; 347 c->cpu_index = id;
424 if (id != 0) 348 if (id != 0)
425 identify_secondary_cpu(c); 349 identify_secondary_cpu(c);
426 smp_apply_quirks(c);
427} 350}
428 351
429 352
@@ -583,7 +506,7 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
583 /* Target chip */ 506 /* Target chip */
584 /* Boot on the stack */ 507 /* Boot on the stack */
585 /* Kick the second */ 508 /* Kick the second */
586 apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid); 509 apic_icr_write(APIC_DM_NMI | apic->dest_logical, logical_apicid);
587 510
588 pr_debug("Waiting for send to finish...\n"); 511 pr_debug("Waiting for send to finish...\n");
589 send_status = safe_apic_wait_icr_idle(); 512 send_status = safe_apic_wait_icr_idle();
@@ -614,12 +537,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
614 unsigned long send_status, accept_status = 0; 537 unsigned long send_status, accept_status = 0;
615 int maxlvt, num_starts, j; 538 int maxlvt, num_starts, j;
616 539
617 if (get_uv_system_type() == UV_NON_UNIQUE_APIC) {
618 send_status = uv_wakeup_secondary(phys_apicid, start_eip);
619 atomic_set(&init_deasserted, 1);
620 return send_status;
621 }
622
623 maxlvt = lapic_get_maxlvt(); 540 maxlvt = lapic_get_maxlvt();
624 541
625 /* 542 /*
@@ -745,78 +662,23 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
745 complete(&c_idle->done); 662 complete(&c_idle->done);
746} 663}
747 664
748#ifdef CONFIG_X86_64
749
750/* __ref because it's safe to call free_bootmem when after_bootmem == 0. */
751static void __ref free_bootmem_pda(struct x8664_pda *oldpda)
752{
753 if (!after_bootmem)
754 free_bootmem((unsigned long)oldpda, sizeof(*oldpda));
755}
756
757/*
758 * Allocate node local memory for the AP pda.
759 *
760 * Must be called after the _cpu_pda pointer table is initialized.
761 */
762int __cpuinit get_local_pda(int cpu)
763{
764 struct x8664_pda *oldpda, *newpda;
765 unsigned long size = sizeof(struct x8664_pda);
766 int node = cpu_to_node(cpu);
767
768 if (cpu_pda(cpu) && !cpu_pda(cpu)->in_bootmem)
769 return 0;
770
771 oldpda = cpu_pda(cpu);
772 newpda = kmalloc_node(size, GFP_ATOMIC, node);
773 if (!newpda) {
774 printk(KERN_ERR "Could not allocate node local PDA "
775 "for CPU %d on node %d\n", cpu, node);
776
777 if (oldpda)
778 return 0; /* have a usable pda */
779 else
780 return -1;
781 }
782
783 if (oldpda) {
784 memcpy(newpda, oldpda, size);
785 free_bootmem_pda(oldpda);
786 }
787
788 newpda->in_bootmem = 0;
789 cpu_pda(cpu) = newpda;
790 return 0;
791}
792#endif /* CONFIG_X86_64 */
793
794static int __cpuinit do_boot_cpu(int apicid, int cpu)
795/* 665/*
796 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 666 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
797 * (ie clustered apic addressing mode), this is a LOGICAL apic ID. 667 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
798 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. 668 * Returns zero if CPU booted OK, else error code from
669 * ->wakeup_secondary_cpu.
799 */ 670 */
671static int __cpuinit do_boot_cpu(int apicid, int cpu)
800{ 672{
801 unsigned long boot_error = 0; 673 unsigned long boot_error = 0;
802 int timeout;
803 unsigned long start_ip; 674 unsigned long start_ip;
804 unsigned short nmi_high = 0, nmi_low = 0; 675 int timeout;
805 struct create_idle c_idle = { 676 struct create_idle c_idle = {
806 .cpu = cpu, 677 .cpu = cpu,
807 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), 678 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
808 }; 679 };
809 INIT_WORK(&c_idle.work, do_fork_idle);
810 680
811#ifdef CONFIG_X86_64 681 INIT_WORK(&c_idle.work, do_fork_idle);
812 /* Allocate node local memory for AP pdas */
813 if (cpu > 0) {
814 boot_error = get_local_pda(cpu);
815 if (boot_error)
816 goto restore_state;
817 /* if can't get pda memory, can't start cpu */
818 }
819#endif
820 682
821 alternatives_smp_switch(1); 683 alternatives_smp_switch(1);
822 684
@@ -847,14 +709,16 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
847 709
848 set_idle_for_cpu(cpu, c_idle.idle); 710 set_idle_for_cpu(cpu, c_idle.idle);
849do_rest: 711do_rest:
850#ifdef CONFIG_X86_32
851 per_cpu(current_task, cpu) = c_idle.idle; 712 per_cpu(current_task, cpu) = c_idle.idle;
852 init_gdt(cpu); 713#ifdef CONFIG_X86_32
853 /* Stack for startup_32 can be just as for start_secondary onwards */ 714 /* Stack for startup_32 can be just as for start_secondary onwards */
854 irq_ctx_init(cpu); 715 irq_ctx_init(cpu);
855#else 716#else
856 cpu_pda(cpu)->pcurrent = c_idle.idle;
857 clear_tsk_thread_flag(c_idle.idle, TIF_FORK); 717 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
718 initial_gs = per_cpu_offset(cpu);
719 per_cpu(kernel_stack, cpu) =
720 (unsigned long)task_stack_page(c_idle.idle) -
721 KERNEL_STACK_OFFSET + THREAD_SIZE;
858#endif 722#endif
859 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); 723 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
860 initial_code = (unsigned long)start_secondary; 724 initial_code = (unsigned long)start_secondary;
@@ -878,8 +742,6 @@ do_rest:
878 742
879 pr_debug("Setting warm reset code and vector.\n"); 743 pr_debug("Setting warm reset code and vector.\n");
880 744
881 store_NMI_vector(&nmi_high, &nmi_low);
882
883 smpboot_setup_warm_reset_vector(start_ip); 745 smpboot_setup_warm_reset_vector(start_ip);
884 /* 746 /*
885 * Be paranoid about clearing APIC errors. 747 * Be paranoid about clearing APIC errors.
@@ -891,9 +753,13 @@ do_rest:
891 } 753 }
892 754
893 /* 755 /*
894 * Starting actual IPI sequence... 756 * Kick the secondary CPU. Use the method in the APIC driver
757 * if it's defined - or use an INIT boot APIC message otherwise:
895 */ 758 */
896 boot_error = wakeup_secondary_cpu(apicid, start_ip); 759 if (apic->wakeup_secondary_cpu)
760 boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
761 else
762 boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
897 763
898 if (!boot_error) { 764 if (!boot_error) {
899 /* 765 /*
@@ -927,13 +793,11 @@ do_rest:
927 else 793 else
928 /* trampoline code not run */ 794 /* trampoline code not run */
929 printk(KERN_ERR "Not responding.\n"); 795 printk(KERN_ERR "Not responding.\n");
930 if (get_uv_system_type() != UV_NON_UNIQUE_APIC) 796 if (apic->inquire_remote_apic)
931 inquire_remote_apic(apicid); 797 apic->inquire_remote_apic(apicid);
932 } 798 }
933 } 799 }
934#ifdef CONFIG_X86_64 800
935restore_state:
936#endif
937 if (boot_error) { 801 if (boot_error) {
938 /* Try to put things back the way they were before ... */ 802 /* Try to put things back the way they were before ... */
939 numa_remove_cpu(cpu); /* was set by numa_add_cpu */ 803 numa_remove_cpu(cpu); /* was set by numa_add_cpu */
@@ -961,7 +825,7 @@ restore_state:
961 825
962int __cpuinit native_cpu_up(unsigned int cpu) 826int __cpuinit native_cpu_up(unsigned int cpu)
963{ 827{
964 int apicid = cpu_present_to_apicid(cpu); 828 int apicid = apic->cpu_present_to_apicid(cpu);
965 unsigned long flags; 829 unsigned long flags;
966 int err; 830 int err;
967 831
@@ -1054,14 +918,14 @@ static int __init smp_sanity_check(unsigned max_cpus)
1054{ 918{
1055 preempt_disable(); 919 preempt_disable();
1056 920
1057#if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32) 921#if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
1058 if (def_to_bigsmp && nr_cpu_ids > 8) { 922 if (def_to_bigsmp && nr_cpu_ids > 8) {
1059 unsigned int cpu; 923 unsigned int cpu;
1060 unsigned nr; 924 unsigned nr;
1061 925
1062 printk(KERN_WARNING 926 printk(KERN_WARNING
1063 "More than 8 CPUs detected - skipping them.\n" 927 "More than 8 CPUs detected - skipping them.\n"
1064 "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n"); 928 "Use CONFIG_X86_BIGSMP.\n");
1065 929
1066 nr = 0; 930 nr = 0;
1067 for_each_present_cpu(cpu) { 931 for_each_present_cpu(cpu) {
@@ -1107,7 +971,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
1107 * Should not be necessary because the MP table should list the boot 971 * Should not be necessary because the MP table should list the boot
1108 * CPU too, but we do it for the sake of robustness anyway. 972 * CPU too, but we do it for the sake of robustness anyway.
1109 */ 973 */
1110 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { 974 if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
1111 printk(KERN_NOTICE 975 printk(KERN_NOTICE
1112 "weird, boot CPU (#%d) not listed by the BIOS.\n", 976 "weird, boot CPU (#%d) not listed by the BIOS.\n",
1113 boot_cpu_physical_apicid); 977 boot_cpu_physical_apicid);
@@ -1125,6 +989,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
1125 printk(KERN_ERR "... forcing use of dummy APIC emulation." 989 printk(KERN_ERR "... forcing use of dummy APIC emulation."
1126 "(tell your hw vendor)\n"); 990 "(tell your hw vendor)\n");
1127 smpboot_clear_io_apic(); 991 smpboot_clear_io_apic();
992 arch_disable_smp_support();
1128 return -1; 993 return -1;
1129 } 994 }
1130 995
@@ -1181,9 +1046,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1181 current_thread_info()->cpu = 0; /* needed? */ 1046 current_thread_info()->cpu = 0; /* needed? */
1182 set_cpu_sibling_map(0); 1047 set_cpu_sibling_map(0);
1183 1048
1184#ifdef CONFIG_X86_64
1185 enable_IR_x2apic(); 1049 enable_IR_x2apic();
1186 setup_apic_routing(); 1050#ifdef CONFIG_X86_64
1051 default_setup_apic_routing();
1187#endif 1052#endif
1188 1053
1189 if (smp_sanity_check(max_cpus) < 0) { 1054 if (smp_sanity_check(max_cpus) < 0) {
@@ -1207,18 +1072,18 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1207 */ 1072 */
1208 setup_local_APIC(); 1073 setup_local_APIC();
1209 1074
1210#ifdef CONFIG_X86_64
1211 /* 1075 /*
1212 * Enable IO APIC before setting up error vector 1076 * Enable IO APIC before setting up error vector
1213 */ 1077 */
1214 if (!skip_ioapic_setup && nr_ioapics) 1078 if (!skip_ioapic_setup && nr_ioapics)
1215 enable_IO_APIC(); 1079 enable_IO_APIC();
1216#endif 1080
1217 end_local_APIC_setup(); 1081 end_local_APIC_setup();
1218 1082
1219 map_cpu_to_logical_apicid(); 1083 map_cpu_to_logical_apicid();
1220 1084
1221 setup_portio_remap(); 1085 if (apic->setup_portio_remap)
1086 apic->setup_portio_remap();
1222 1087
1223 smpboot_setup_io_apic(); 1088 smpboot_setup_io_apic();
1224 /* 1089 /*
@@ -1240,10 +1105,7 @@ out:
1240void __init native_smp_prepare_boot_cpu(void) 1105void __init native_smp_prepare_boot_cpu(void)
1241{ 1106{
1242 int me = smp_processor_id(); 1107 int me = smp_processor_id();
1243#ifdef CONFIG_X86_32 1108 switch_to_new_gdt(me);
1244 init_gdt(me);
1245#endif
1246 switch_to_new_gdt();
1247 /* already set me in cpu_online_mask in boot_cpu_init() */ 1109 /* already set me in cpu_online_mask in boot_cpu_init() */
1248 cpumask_set_cpu(me, cpu_callout_mask); 1110 cpumask_set_cpu(me, cpu_callout_mask);
1249 per_cpu(cpu_state, me) = CPU_ONLINE; 1111 per_cpu(cpu_state, me) = CPU_ONLINE;
@@ -1254,7 +1116,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
1254 pr_debug("Boot done.\n"); 1116 pr_debug("Boot done.\n");
1255 1117
1256 impress_friends(); 1118 impress_friends();
1257 smp_checks();
1258#ifdef CONFIG_X86_IO_APIC 1119#ifdef CONFIG_X86_IO_APIC
1259 setup_ioapic_dest(); 1120 setup_ioapic_dest();
1260#endif 1121#endif
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c
deleted file mode 100644
index 397e309839d..00000000000
--- a/arch/x86/kernel/smpcommon.c
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * SMP stuff which is common to all sub-architectures.
3 */
4#include <linux/module.h>
5#include <asm/smp.h>
6
7#ifdef CONFIG_X86_32
8DEFINE_PER_CPU(unsigned long, this_cpu_off);
9EXPORT_PER_CPU_SYMBOL(this_cpu_off);
10
11/*
12 * Initialize the CPU's GDT. This is either the boot CPU doing itself
13 * (still using the master per-cpu area), or a CPU doing it for a
14 * secondary which will soon come up.
15 */
16__cpuinit void init_gdt(int cpu)
17{
18 struct desc_struct gdt;
19
20 pack_descriptor(&gdt, __per_cpu_offset[cpu], 0xFFFFF,
21 0x2 | DESCTYPE_S, 0x8);
22 gdt.s = 1;
23
24 write_gdt_entry(get_cpu_gdt_table(cpu),
25 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
26
27 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
28 per_cpu(cpu_number, cpu) = cpu;
29}
30#endif
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 10786af9554..f7bddc2e37d 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Stack trace management functions 2 * Stack trace management functions
3 * 3 *
4 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 4 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 */ 5 */
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/stacktrace.h> 7#include <linux/stacktrace.h>
diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c
deleted file mode 100644
index 7b987852e87..00000000000
--- a/arch/x86/kernel/summit_32.c
+++ /dev/null
@@ -1,188 +0,0 @@
1/*
2 * IBM Summit-Specific Code
3 *
4 * Written By: Matthew Dobson, IBM Corporation
5 *
6 * Copyright (c) 2003 IBM Corp.
7 *
8 * All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
19 * details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 * Send feedback to <colpatch@us.ibm.com>
26 *
27 */
28
29#include <linux/mm.h>
30#include <linux/init.h>
31#include <asm/io.h>
32#include <asm/bios_ebda.h>
33#include <asm/summit/mpparse.h>
34
35static struct rio_table_hdr *rio_table_hdr __initdata;
36static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata;
37static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata;
38
39#ifndef CONFIG_X86_NUMAQ
40static int mp_bus_id_to_node[MAX_MP_BUSSES] __initdata;
41#endif
42
43static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
44{
45 int twister = 0, node = 0;
46 int i, bus, num_buses;
47
48 for (i = 0; i < rio_table_hdr->num_rio_dev; i++) {
49 if (rio_devs[i]->node_id == rio_devs[wpeg_num]->owner_id) {
50 twister = rio_devs[i]->owner_id;
51 break;
52 }
53 }
54 if (i == rio_table_hdr->num_rio_dev) {
55 printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __func__);
56 return last_bus;
57 }
58
59 for (i = 0; i < rio_table_hdr->num_scal_dev; i++) {
60 if (scal_devs[i]->node_id == twister) {
61 node = scal_devs[i]->node_id;
62 break;
63 }
64 }
65 if (i == rio_table_hdr->num_scal_dev) {
66 printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __func__);
67 return last_bus;
68 }
69
70 switch (rio_devs[wpeg_num]->type) {
71 case CompatWPEG:
72 /*
73 * The Compatibility Winnipeg controls the 2 legacy buses,
74 * the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case
75 * a PCI-PCI bridge card is used in either slot: total 5 buses.
76 */
77 num_buses = 5;
78 break;
79 case AltWPEG:
80 /*
81 * The Alternate Winnipeg controls the 2 133MHz buses [1 slot
82 * each], their 2 "extra" buses, the 100MHz bus [2 slots] and
83 * the "extra" buses for each of those slots: total 7 buses.
84 */
85 num_buses = 7;
86 break;
87 case LookOutAWPEG:
88 case LookOutBWPEG:
89 /*
90 * A Lookout Winnipeg controls 3 100MHz buses [2 slots each]
91 * & the "extra" buses for each of those slots: total 9 buses.
92 */
93 num_buses = 9;
94 break;
95 default:
96 printk(KERN_INFO "%s: Unsupported Winnipeg type!\n", __func__);
97 return last_bus;
98 }
99
100 for (bus = last_bus; bus < last_bus + num_buses; bus++)
101 mp_bus_id_to_node[bus] = node;
102 return bus;
103}
104
105static int __init build_detail_arrays(void)
106{
107 unsigned long ptr;
108 int i, scal_detail_size, rio_detail_size;
109
110 if (rio_table_hdr->num_scal_dev > MAX_NUMNODES) {
111 printk(KERN_WARNING "%s: MAX_NUMNODES too low! Defined as %d, but system has %d nodes.\n", __func__, MAX_NUMNODES, rio_table_hdr->num_scal_dev);
112 return 0;
113 }
114
115 switch (rio_table_hdr->version) {
116 default:
117 printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __func__, rio_table_hdr->version);
118 return 0;
119 case 2:
120 scal_detail_size = 11;
121 rio_detail_size = 13;
122 break;
123 case 3:
124 scal_detail_size = 12;
125 rio_detail_size = 15;
126 break;
127 }
128
129 ptr = (unsigned long)rio_table_hdr + 3;
130 for (i = 0; i < rio_table_hdr->num_scal_dev; i++, ptr += scal_detail_size)
131 scal_devs[i] = (struct scal_detail *)ptr;
132
133 for (i = 0; i < rio_table_hdr->num_rio_dev; i++, ptr += rio_detail_size)
134 rio_devs[i] = (struct rio_detail *)ptr;
135
136 return 1;
137}
138
139void __init setup_summit(void)
140{
141 unsigned long ptr;
142 unsigned short offset;
143 int i, next_wpeg, next_bus = 0;
144
145 /* The pointer to the EBDA is stored in the word @ phys 0x40E(40:0E) */
146 ptr = get_bios_ebda();
147 ptr = (unsigned long)phys_to_virt(ptr);
148
149 rio_table_hdr = NULL;
150 offset = 0x180;
151 while (offset) {
152 /* The block id is stored in the 2nd word */
153 if (*((unsigned short *)(ptr + offset + 2)) == 0x4752) {
154 /* set the pointer past the offset & block id */
155 rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4);
156 break;
157 }
158 /* The next offset is stored in the 1st word. 0 means no more */
159 offset = *((unsigned short *)(ptr + offset));
160 }
161 if (!rio_table_hdr) {
162 printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __func__);
163 return;
164 }
165
166 if (!build_detail_arrays())
167 return;
168
169 /* The first Winnipeg we're looking for has an index of 0 */
170 next_wpeg = 0;
171 do {
172 for (i = 0; i < rio_table_hdr->num_rio_dev; i++) {
173 if (is_WPEG(rio_devs[i]) && rio_devs[i]->WP_index == next_wpeg) {
174 /* It's the Winnipeg we're looking for! */
175 next_bus = setup_pci_node_map_for_wpeg(i, next_bus);
176 next_wpeg++;
177 break;
178 }
179 }
180 /*
181 * If we go through all Rio devices and don't find one with
182 * the next index, it means we've found all the Winnipegs,
183 * and thus all the PCI buses.
184 */
185 if (i == rio_table_hdr->num_rio_dev)
186 next_wpeg = 0;
187 } while (next_wpeg != 0);
188}
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index e2e86a08f31..3bdb64829b8 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -1,7 +1,7 @@
1ENTRY(sys_call_table) 1ENTRY(sys_call_table)
2 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ 2 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
3 .long sys_exit 3 .long sys_exit
4 .long sys_fork 4 .long ptregs_fork
5 .long sys_read 5 .long sys_read
6 .long sys_write 6 .long sys_write
7 .long sys_open /* 5 */ 7 .long sys_open /* 5 */
@@ -10,7 +10,7 @@ ENTRY(sys_call_table)
10 .long sys_creat 10 .long sys_creat
11 .long sys_link 11 .long sys_link
12 .long sys_unlink /* 10 */ 12 .long sys_unlink /* 10 */
13 .long sys_execve 13 .long ptregs_execve
14 .long sys_chdir 14 .long sys_chdir
15 .long sys_time 15 .long sys_time
16 .long sys_mknod 16 .long sys_mknod
@@ -109,17 +109,17 @@ ENTRY(sys_call_table)
109 .long sys_newlstat 109 .long sys_newlstat
110 .long sys_newfstat 110 .long sys_newfstat
111 .long sys_uname 111 .long sys_uname
112 .long sys_iopl /* 110 */ 112 .long ptregs_iopl /* 110 */
113 .long sys_vhangup 113 .long sys_vhangup
114 .long sys_ni_syscall /* old "idle" system call */ 114 .long sys_ni_syscall /* old "idle" system call */
115 .long sys_vm86old 115 .long ptregs_vm86old
116 .long sys_wait4 116 .long sys_wait4
117 .long sys_swapoff /* 115 */ 117 .long sys_swapoff /* 115 */
118 .long sys_sysinfo 118 .long sys_sysinfo
119 .long sys_ipc 119 .long sys_ipc
120 .long sys_fsync 120 .long sys_fsync
121 .long sys_sigreturn 121 .long ptregs_sigreturn
122 .long sys_clone /* 120 */ 122 .long ptregs_clone /* 120 */
123 .long sys_setdomainname 123 .long sys_setdomainname
124 .long sys_newuname 124 .long sys_newuname
125 .long sys_modify_ldt 125 .long sys_modify_ldt
@@ -165,14 +165,14 @@ ENTRY(sys_call_table)
165 .long sys_mremap 165 .long sys_mremap
166 .long sys_setresuid16 166 .long sys_setresuid16
167 .long sys_getresuid16 /* 165 */ 167 .long sys_getresuid16 /* 165 */
168 .long sys_vm86 168 .long ptregs_vm86
169 .long sys_ni_syscall /* Old sys_query_module */ 169 .long sys_ni_syscall /* Old sys_query_module */
170 .long sys_poll 170 .long sys_poll
171 .long sys_nfsservctl 171 .long sys_nfsservctl
172 .long sys_setresgid16 /* 170 */ 172 .long sys_setresgid16 /* 170 */
173 .long sys_getresgid16 173 .long sys_getresgid16
174 .long sys_prctl 174 .long sys_prctl
175 .long sys_rt_sigreturn 175 .long ptregs_rt_sigreturn
176 .long sys_rt_sigaction 176 .long sys_rt_sigaction
177 .long sys_rt_sigprocmask /* 175 */ 177 .long sys_rt_sigprocmask /* 175 */
178 .long sys_rt_sigpending 178 .long sys_rt_sigpending
@@ -185,11 +185,11 @@ ENTRY(sys_call_table)
185 .long sys_getcwd 185 .long sys_getcwd
186 .long sys_capget 186 .long sys_capget
187 .long sys_capset /* 185 */ 187 .long sys_capset /* 185 */
188 .long sys_sigaltstack 188 .long ptregs_sigaltstack
189 .long sys_sendfile 189 .long sys_sendfile
190 .long sys_ni_syscall /* reserved for streams1 */ 190 .long sys_ni_syscall /* reserved for streams1 */
191 .long sys_ni_syscall /* reserved for streams2 */ 191 .long sys_ni_syscall /* reserved for streams2 */
192 .long sys_vfork /* 190 */ 192 .long ptregs_vfork /* 190 */
193 .long sys_getrlimit 193 .long sys_getrlimit
194 .long sys_mmap2 194 .long sys_mmap2
195 .long sys_truncate64 195 .long sys_truncate64
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index 3985cac0ed4..5c5d87f0b2e 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -33,12 +33,12 @@
33#include <linux/time.h> 33#include <linux/time.h>
34#include <linux/mca.h> 34#include <linux/mca.h>
35 35
36#include <asm/arch_hooks.h> 36#include <asm/setup.h>
37#include <asm/hpet.h> 37#include <asm/hpet.h>
38#include <asm/time.h> 38#include <asm/time.h>
39#include <asm/timer.h> 39#include <asm/timer.h>
40 40
41#include "do_timer.h" 41#include <asm/do_timer.h>
42 42
43int timer_ack; 43int timer_ack;
44 44
@@ -118,7 +118,7 @@ void __init hpet_time_init(void)
118{ 118{
119 if (!hpet_enable()) 119 if (!hpet_enable())
120 setup_pit_timer(); 120 setup_pit_timer();
121 time_init_hook(); 121 x86_quirk_time_init();
122} 122}
123 123
124/* 124/*
@@ -131,7 +131,7 @@ void __init hpet_time_init(void)
131 */ 131 */
132void __init time_init(void) 132void __init time_init(void)
133{ 133{
134 pre_time_init_hook(); 134 x86_quirk_pre_time_init();
135 tsc_init(); 135 tsc_init();
136 late_time_init = choose_time_init(); 136 late_time_init = choose_time_init();
137} 137}
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
deleted file mode 100644
index ce505464224..00000000000
--- a/arch/x86/kernel/tlb_32.c
+++ /dev/null
@@ -1,256 +0,0 @@
1#include <linux/spinlock.h>
2#include <linux/cpu.h>
3#include <linux/interrupt.h>
4
5#include <asm/tlbflush.h>
6
7DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate)
8 ____cacheline_aligned = { &init_mm, 0, };
9
10/* must come after the send_IPI functions above for inlining */
11#include <mach_ipi.h>
12
13/*
14 * Smarter SMP flushing macros.
15 * c/o Linus Torvalds.
16 *
17 * These mean you can really definitely utterly forget about
18 * writing to user space from interrupts. (Its not allowed anyway).
19 *
20 * Optimizations Manfred Spraul <manfred@colorfullife.com>
21 */
22
23static cpumask_t flush_cpumask;
24static struct mm_struct *flush_mm;
25static unsigned long flush_va;
26static DEFINE_SPINLOCK(tlbstate_lock);
27
28/*
29 * We cannot call mmdrop() because we are in interrupt context,
30 * instead update mm->cpu_vm_mask.
31 *
32 * We need to reload %cr3 since the page tables may be going
33 * away from under us..
34 */
35void leave_mm(int cpu)
36{
37 BUG_ON(x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK);
38 cpu_clear(cpu, x86_read_percpu(cpu_tlbstate.active_mm)->cpu_vm_mask);
39 load_cr3(swapper_pg_dir);
40}
41EXPORT_SYMBOL_GPL(leave_mm);
42
43/*
44 *
45 * The flush IPI assumes that a thread switch happens in this order:
46 * [cpu0: the cpu that switches]
47 * 1) switch_mm() either 1a) or 1b)
48 * 1a) thread switch to a different mm
49 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
50 * Stop ipi delivery for the old mm. This is not synchronized with
51 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
52 * for the wrong mm, and in the worst case we perform a superfluous
53 * tlb flush.
54 * 1a2) set cpu_tlbstate to TLBSTATE_OK
55 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
56 * was in lazy tlb mode.
57 * 1a3) update cpu_tlbstate[].active_mm
58 * Now cpu0 accepts tlb flushes for the new mm.
59 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
60 * Now the other cpus will send tlb flush ipis.
61 * 1a4) change cr3.
62 * 1b) thread switch without mm change
63 * cpu_tlbstate[].active_mm is correct, cpu0 already handles
64 * flush ipis.
65 * 1b1) set cpu_tlbstate to TLBSTATE_OK
66 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
67 * Atomically set the bit [other cpus will start sending flush ipis],
68 * and test the bit.
69 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
70 * 2) switch %%esp, ie current
71 *
72 * The interrupt must handle 2 special cases:
73 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
74 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
75 * runs in kernel space, the cpu could load tlb entries for user space
76 * pages.
77 *
78 * The good news is that cpu_tlbstate is local to each cpu, no
79 * write/read ordering problems.
80 */
81
82/*
83 * TLB flush IPI:
84 *
85 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
86 * 2) Leave the mm if we are in the lazy tlb mode.
87 */
88
89void smp_invalidate_interrupt(struct pt_regs *regs)
90{
91 unsigned long cpu;
92
93 cpu = get_cpu();
94
95 if (!cpu_isset(cpu, flush_cpumask))
96 goto out;
97 /*
98 * This was a BUG() but until someone can quote me the
99 * line from the intel manual that guarantees an IPI to
100 * multiple CPUs is retried _only_ on the erroring CPUs
101 * its staying as a return
102 *
103 * BUG();
104 */
105
106 if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) {
107 if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) {
108 if (flush_va == TLB_FLUSH_ALL)
109 local_flush_tlb();
110 else
111 __flush_tlb_one(flush_va);
112 } else
113 leave_mm(cpu);
114 }
115 ack_APIC_irq();
116 smp_mb__before_clear_bit();
117 cpu_clear(cpu, flush_cpumask);
118 smp_mb__after_clear_bit();
119out:
120 put_cpu_no_resched();
121 inc_irq_stat(irq_tlb_count);
122}
123
124void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
125 unsigned long va)
126{
127 cpumask_t cpumask = *cpumaskp;
128
129 /*
130 * A couple of (to be removed) sanity checks:
131 *
132 * - current CPU must not be in mask
133 * - mask must exist :)
134 */
135 BUG_ON(cpus_empty(cpumask));
136 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
137 BUG_ON(!mm);
138
139#ifdef CONFIG_HOTPLUG_CPU
140 /* If a CPU which we ran on has gone down, OK. */
141 cpus_and(cpumask, cpumask, cpu_online_map);
142 if (unlikely(cpus_empty(cpumask)))
143 return;
144#endif
145
146 /*
147 * i'm not happy about this global shared spinlock in the
148 * MM hot path, but we'll see how contended it is.
149 * AK: x86-64 has a faster method that could be ported.
150 */
151 spin_lock(&tlbstate_lock);
152
153 flush_mm = mm;
154 flush_va = va;
155 cpus_or(flush_cpumask, cpumask, flush_cpumask);
156
157 /*
158 * Make the above memory operations globally visible before
159 * sending the IPI.
160 */
161 smp_mb();
162 /*
163 * We have to send the IPI only to
164 * CPUs affected.
165 */
166 send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR);
167
168 while (!cpus_empty(flush_cpumask))
169 /* nothing. lockup detection does not belong here */
170 cpu_relax();
171
172 flush_mm = NULL;
173 flush_va = 0;
174 spin_unlock(&tlbstate_lock);
175}
176
177void flush_tlb_current_task(void)
178{
179 struct mm_struct *mm = current->mm;
180 cpumask_t cpu_mask;
181
182 preempt_disable();
183 cpu_mask = mm->cpu_vm_mask;
184 cpu_clear(smp_processor_id(), cpu_mask);
185
186 local_flush_tlb();
187 if (!cpus_empty(cpu_mask))
188 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
189 preempt_enable();
190}
191
192void flush_tlb_mm(struct mm_struct *mm)
193{
194 cpumask_t cpu_mask;
195
196 preempt_disable();
197 cpu_mask = mm->cpu_vm_mask;
198 cpu_clear(smp_processor_id(), cpu_mask);
199
200 if (current->active_mm == mm) {
201 if (current->mm)
202 local_flush_tlb();
203 else
204 leave_mm(smp_processor_id());
205 }
206 if (!cpus_empty(cpu_mask))
207 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
208
209 preempt_enable();
210}
211
212void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
213{
214 struct mm_struct *mm = vma->vm_mm;
215 cpumask_t cpu_mask;
216
217 preempt_disable();
218 cpu_mask = mm->cpu_vm_mask;
219 cpu_clear(smp_processor_id(), cpu_mask);
220
221 if (current->active_mm == mm) {
222 if (current->mm)
223 __flush_tlb_one(va);
224 else
225 leave_mm(smp_processor_id());
226 }
227
228 if (!cpus_empty(cpu_mask))
229 flush_tlb_others(cpu_mask, mm, va);
230
231 preempt_enable();
232}
233EXPORT_SYMBOL(flush_tlb_page);
234
235static void do_flush_tlb_all(void *info)
236{
237 unsigned long cpu = smp_processor_id();
238
239 __flush_tlb_all();
240 if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_LAZY)
241 leave_mm(cpu);
242}
243
244void flush_tlb_all(void)
245{
246 on_each_cpu(do_flush_tlb_all, NULL, 1);
247}
248
249void reset_lazy_tlbstate(void)
250{
251 int cpu = raw_smp_processor_id();
252
253 per_cpu(cpu_tlbstate, cpu).state = 0;
254 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
255}
256
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
deleted file mode 100644
index f8be6f1d2e4..00000000000
--- a/arch/x86/kernel/tlb_64.c
+++ /dev/null
@@ -1,284 +0,0 @@
1#include <linux/init.h>
2
3#include <linux/mm.h>
4#include <linux/delay.h>
5#include <linux/spinlock.h>
6#include <linux/smp.h>
7#include <linux/kernel_stat.h>
8#include <linux/mc146818rtc.h>
9#include <linux/interrupt.h>
10
11#include <asm/mtrr.h>
12#include <asm/pgalloc.h>
13#include <asm/tlbflush.h>
14#include <asm/mmu_context.h>
15#include <asm/proto.h>
16#include <asm/apicdef.h>
17#include <asm/idle.h>
18#include <asm/uv/uv_hub.h>
19#include <asm/uv/uv_bau.h>
20
21#include <mach_ipi.h>
22/*
23 * Smarter SMP flushing macros.
24 * c/o Linus Torvalds.
25 *
26 * These mean you can really definitely utterly forget about
27 * writing to user space from interrupts. (Its not allowed anyway).
28 *
29 * Optimizations Manfred Spraul <manfred@colorfullife.com>
30 *
31 * More scalable flush, from Andi Kleen
32 *
33 * To avoid global state use 8 different call vectors.
34 * Each CPU uses a specific vector to trigger flushes on other
35 * CPUs. Depending on the received vector the target CPUs look into
36 * the right per cpu variable for the flush data.
37 *
38 * With more than 8 CPUs they are hashed to the 8 available
39 * vectors. The limited global vector space forces us to this right now.
40 * In future when interrupts are split into per CPU domains this could be
41 * fixed, at the cost of triggering multiple IPIs in some cases.
42 */
43
44union smp_flush_state {
45 struct {
46 cpumask_t flush_cpumask;
47 struct mm_struct *flush_mm;
48 unsigned long flush_va;
49 spinlock_t tlbstate_lock;
50 };
51 char pad[SMP_CACHE_BYTES];
52} ____cacheline_aligned;
53
54/* State is put into the per CPU data section, but padded
55 to a full cache line because other CPUs can access it and we don't
56 want false sharing in the per cpu data segment. */
57static DEFINE_PER_CPU(union smp_flush_state, flush_state);
58
59/*
60 * We cannot call mmdrop() because we are in interrupt context,
61 * instead update mm->cpu_vm_mask.
62 */
63void leave_mm(int cpu)
64{
65 if (read_pda(mmu_state) == TLBSTATE_OK)
66 BUG();
67 cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
68 load_cr3(swapper_pg_dir);
69}
70EXPORT_SYMBOL_GPL(leave_mm);
71
72/*
73 *
74 * The flush IPI assumes that a thread switch happens in this order:
75 * [cpu0: the cpu that switches]
76 * 1) switch_mm() either 1a) or 1b)
77 * 1a) thread switch to a different mm
78 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
79 * Stop ipi delivery for the old mm. This is not synchronized with
80 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
81 * for the wrong mm, and in the worst case we perform a superfluous
82 * tlb flush.
83 * 1a2) set cpu mmu_state to TLBSTATE_OK
84 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
85 * was in lazy tlb mode.
86 * 1a3) update cpu active_mm
87 * Now cpu0 accepts tlb flushes for the new mm.
88 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
89 * Now the other cpus will send tlb flush ipis.
90 * 1a4) change cr3.
91 * 1b) thread switch without mm change
92 * cpu active_mm is correct, cpu0 already handles
93 * flush ipis.
94 * 1b1) set cpu mmu_state to TLBSTATE_OK
95 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
96 * Atomically set the bit [other cpus will start sending flush ipis],
97 * and test the bit.
98 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
99 * 2) switch %%esp, ie current
100 *
101 * The interrupt must handle 2 special cases:
102 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
103 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
104 * runs in kernel space, the cpu could load tlb entries for user space
105 * pages.
106 *
107 * The good news is that cpu mmu_state is local to each cpu, no
108 * write/read ordering problems.
109 */
110
111/*
112 * TLB flush IPI:
113 *
114 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
115 * 2) Leave the mm if we are in the lazy tlb mode.
116 *
117 * Interrupts are disabled.
118 */
119
120asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
121{
122 int cpu;
123 int sender;
124 union smp_flush_state *f;
125
126 cpu = smp_processor_id();
127 /*
128 * orig_rax contains the negated interrupt vector.
129 * Use that to determine where the sender put the data.
130 */
131 sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
132 f = &per_cpu(flush_state, sender);
133
134 if (!cpu_isset(cpu, f->flush_cpumask))
135 goto out;
136 /*
137 * This was a BUG() but until someone can quote me the
138 * line from the intel manual that guarantees an IPI to
139 * multiple CPUs is retried _only_ on the erroring CPUs
140 * its staying as a return
141 *
142 * BUG();
143 */
144
145 if (f->flush_mm == read_pda(active_mm)) {
146 if (read_pda(mmu_state) == TLBSTATE_OK) {
147 if (f->flush_va == TLB_FLUSH_ALL)
148 local_flush_tlb();
149 else
150 __flush_tlb_one(f->flush_va);
151 } else
152 leave_mm(cpu);
153 }
154out:
155 ack_APIC_irq();
156 cpu_clear(cpu, f->flush_cpumask);
157 inc_irq_stat(irq_tlb_count);
158}
159
160void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
161 unsigned long va)
162{
163 int sender;
164 union smp_flush_state *f;
165 cpumask_t cpumask = *cpumaskp;
166
167 if (is_uv_system() && uv_flush_tlb_others(&cpumask, mm, va))
168 return;
169
170 /* Caller has disabled preemption */
171 sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
172 f = &per_cpu(flush_state, sender);
173
174 /*
175 * Could avoid this lock when
176 * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
177 * probably not worth checking this for a cache-hot lock.
178 */
179 spin_lock(&f->tlbstate_lock);
180
181 f->flush_mm = mm;
182 f->flush_va = va;
183 cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
184
185 /*
186 * Make the above memory operations globally visible before
187 * sending the IPI.
188 */
189 smp_mb();
190 /*
191 * We have to send the IPI only to
192 * CPUs affected.
193 */
194 send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender);
195
196 while (!cpus_empty(f->flush_cpumask))
197 cpu_relax();
198
199 f->flush_mm = NULL;
200 f->flush_va = 0;
201 spin_unlock(&f->tlbstate_lock);
202}
203
204static int __cpuinit init_smp_flush(void)
205{
206 int i;
207
208 for_each_possible_cpu(i)
209 spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
210
211 return 0;
212}
213core_initcall(init_smp_flush);
214
215void flush_tlb_current_task(void)
216{
217 struct mm_struct *mm = current->mm;
218 cpumask_t cpu_mask;
219
220 preempt_disable();
221 cpu_mask = mm->cpu_vm_mask;
222 cpu_clear(smp_processor_id(), cpu_mask);
223
224 local_flush_tlb();
225 if (!cpus_empty(cpu_mask))
226 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
227 preempt_enable();
228}
229
230void flush_tlb_mm(struct mm_struct *mm)
231{
232 cpumask_t cpu_mask;
233
234 preempt_disable();
235 cpu_mask = mm->cpu_vm_mask;
236 cpu_clear(smp_processor_id(), cpu_mask);
237
238 if (current->active_mm == mm) {
239 if (current->mm)
240 local_flush_tlb();
241 else
242 leave_mm(smp_processor_id());
243 }
244 if (!cpus_empty(cpu_mask))
245 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
246
247 preempt_enable();
248}
249
250void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
251{
252 struct mm_struct *mm = vma->vm_mm;
253 cpumask_t cpu_mask;
254
255 preempt_disable();
256 cpu_mask = mm->cpu_vm_mask;
257 cpu_clear(smp_processor_id(), cpu_mask);
258
259 if (current->active_mm == mm) {
260 if (current->mm)
261 __flush_tlb_one(va);
262 else
263 leave_mm(smp_processor_id());
264 }
265
266 if (!cpus_empty(cpu_mask))
267 flush_tlb_others(cpu_mask, mm, va);
268
269 preempt_enable();
270}
271
272static void do_flush_tlb_all(void *info)
273{
274 unsigned long cpu = smp_processor_id();
275
276 __flush_tlb_all();
277 if (read_pda(mmu_state) == TLBSTATE_LAZY)
278 leave_mm(cpu);
279}
280
281void flush_tlb_all(void)
282{
283 on_each_cpu(do_flush_tlb_all, NULL, 1);
284}
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 6812b829ed8..d038b9c45cf 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -11,16 +11,15 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12 12
13#include <asm/mmu_context.h> 13#include <asm/mmu_context.h>
14#include <asm/uv/uv.h>
14#include <asm/uv/uv_mmrs.h> 15#include <asm/uv/uv_mmrs.h>
15#include <asm/uv/uv_hub.h> 16#include <asm/uv/uv_hub.h>
16#include <asm/uv/uv_bau.h> 17#include <asm/uv/uv_bau.h>
17#include <asm/genapic.h> 18#include <asm/apic.h>
18#include <asm/idle.h> 19#include <asm/idle.h>
19#include <asm/tsc.h> 20#include <asm/tsc.h>
20#include <asm/irq_vectors.h> 21#include <asm/irq_vectors.h>
21 22
22#include <mach_apic.h>
23
24static struct bau_control **uv_bau_table_bases __read_mostly; 23static struct bau_control **uv_bau_table_bases __read_mostly;
25static int uv_bau_retry_limit __read_mostly; 24static int uv_bau_retry_limit __read_mostly;
26 25
@@ -210,14 +209,15 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
210 * 209 *
211 * Send a broadcast and wait for a broadcast message to complete. 210 * Send a broadcast and wait for a broadcast message to complete.
212 * 211 *
213 * The cpumaskp mask contains the cpus the broadcast was sent to. 212 * The flush_mask contains the cpus the broadcast was sent to.
214 * 213 *
215 * Returns 1 if all remote flushing was done. The mask is zeroed. 214 * Returns NULL if all remote flushing was done. The mask is zeroed.
216 * Returns 0 if some remote flushing remains to be done. The mask is left 215 * Returns @flush_mask if some remote flushing remains to be done. The
217 * unchanged. 216 * mask will have some bits still set.
218 */ 217 */
219int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc, 218const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
220 cpumask_t *cpumaskp) 219 struct bau_desc *bau_desc,
220 struct cpumask *flush_mask)
221{ 221{
222 int completion_status = 0; 222 int completion_status = 0;
223 int right_shift; 223 int right_shift;
@@ -257,66 +257,74 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
257 * the cpu's, all of which are still in the mask. 257 * the cpu's, all of which are still in the mask.
258 */ 258 */
259 __get_cpu_var(ptcstats).ptc_i++; 259 __get_cpu_var(ptcstats).ptc_i++;
260 return 0; 260 return flush_mask;
261 } 261 }
262 262
263 /* 263 /*
264 * Success, so clear the remote cpu's from the mask so we don't 264 * Success, so clear the remote cpu's from the mask so we don't
265 * use the IPI method of shootdown on them. 265 * use the IPI method of shootdown on them.
266 */ 266 */
267 for_each_cpu_mask(bit, *cpumaskp) { 267 for_each_cpu(bit, flush_mask) {
268 blade = uv_cpu_to_blade_id(bit); 268 blade = uv_cpu_to_blade_id(bit);
269 if (blade == this_blade) 269 if (blade == this_blade)
270 continue; 270 continue;
271 cpu_clear(bit, *cpumaskp); 271 cpumask_clear_cpu(bit, flush_mask);
272 } 272 }
273 if (!cpus_empty(*cpumaskp)) 273 if (!cpumask_empty(flush_mask))
274 return 0; 274 return flush_mask;
275 return 1; 275 return NULL;
276} 276}
277 277
278/** 278/**
279 * uv_flush_tlb_others - globally purge translation cache of a virtual 279 * uv_flush_tlb_others - globally purge translation cache of a virtual
280 * address or all TLB's 280 * address or all TLB's
281 * @cpumaskp: mask of all cpu's in which the address is to be removed 281 * @cpumask: mask of all cpu's in which the address is to be removed
282 * @mm: mm_struct containing virtual address range 282 * @mm: mm_struct containing virtual address range
283 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu) 283 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
284 * @cpu: the current cpu
284 * 285 *
285 * This is the entry point for initiating any UV global TLB shootdown. 286 * This is the entry point for initiating any UV global TLB shootdown.
286 * 287 *
287 * Purges the translation caches of all specified processors of the given 288 * Purges the translation caches of all specified processors of the given
288 * virtual address, or purges all TLB's on specified processors. 289 * virtual address, or purges all TLB's on specified processors.
289 * 290 *
290 * The caller has derived the cpumaskp from the mm_struct and has subtracted 291 * The caller has derived the cpumask from the mm_struct. This function
291 * the local cpu from the mask. This function is called only if there 292 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
292 * are bits set in the mask. (e.g. flush_tlb_page())
293 * 293 *
294 * The cpumaskp is converted into a nodemask of the nodes containing 294 * The cpumask is converted into a nodemask of the nodes containing
295 * the cpus. 295 * the cpus.
296 * 296 *
297 * Returns 1 if all remote flushing was done. 297 * Note that this function should be called with preemption disabled.
298 * Returns 0 if some remote flushing remains to be done. 298 *
299 * Returns NULL if all remote flushing was done.
300 * Returns pointer to cpumask if some remote flushing remains to be
301 * done. The returned pointer is valid till preemption is re-enabled.
299 */ 302 */
300int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm, 303const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
301 unsigned long va) 304 struct mm_struct *mm,
305 unsigned long va, unsigned int cpu)
302{ 306{
307 static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
308 struct cpumask *flush_mask = &__get_cpu_var(flush_tlb_mask);
303 int i; 309 int i;
304 int bit; 310 int bit;
305 int blade; 311 int blade;
306 int cpu; 312 int uv_cpu;
307 int this_blade; 313 int this_blade;
308 int locals = 0; 314 int locals = 0;
309 struct bau_desc *bau_desc; 315 struct bau_desc *bau_desc;
310 316
311 cpu = uv_blade_processor_id(); 317 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
318
319 uv_cpu = uv_blade_processor_id();
312 this_blade = uv_numa_blade_id(); 320 this_blade = uv_numa_blade_id();
313 bau_desc = __get_cpu_var(bau_control).descriptor_base; 321 bau_desc = __get_cpu_var(bau_control).descriptor_base;
314 bau_desc += UV_ITEMS_PER_DESCRIPTOR * cpu; 322 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
315 323
316 bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); 324 bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
317 325
318 i = 0; 326 i = 0;
319 for_each_cpu_mask(bit, *cpumaskp) { 327 for_each_cpu(bit, flush_mask) {
320 blade = uv_cpu_to_blade_id(bit); 328 blade = uv_cpu_to_blade_id(bit);
321 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); 329 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1));
322 if (blade == this_blade) { 330 if (blade == this_blade) {
@@ -331,17 +339,17 @@ int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
331 * no off_node flushing; return status for local node 339 * no off_node flushing; return status for local node
332 */ 340 */
333 if (locals) 341 if (locals)
334 return 0; 342 return flush_mask;
335 else 343 else
336 return 1; 344 return NULL;
337 } 345 }
338 __get_cpu_var(ptcstats).requestor++; 346 __get_cpu_var(ptcstats).requestor++;
339 __get_cpu_var(ptcstats).ntargeted += i; 347 __get_cpu_var(ptcstats).ntargeted += i;
340 348
341 bau_desc->payload.address = va; 349 bau_desc->payload.address = va;
342 bau_desc->payload.sending_cpu = smp_processor_id(); 350 bau_desc->payload.sending_cpu = cpu;
343 351
344 return uv_flush_send_and_wait(cpu, this_blade, bau_desc, cpumaskp); 352 return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask);
345} 353}
346 354
347/* 355/*
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
index d8ccc3c6552..66d874e5404 100644
--- a/arch/x86/kernel/trampoline_32.S
+++ b/arch/x86/kernel/trampoline_32.S
@@ -29,7 +29,7 @@
29 29
30#include <linux/linkage.h> 30#include <linux/linkage.h>
31#include <asm/segment.h> 31#include <asm/segment.h>
32#include <asm/page.h> 32#include <asm/page_types.h>
33 33
34/* We can free up trampoline after bootup if cpu hotplug is not supported. */ 34/* We can free up trampoline after bootup if cpu hotplug is not supported. */
35#ifndef CONFIG_HOTPLUG_CPU 35#ifndef CONFIG_HOTPLUG_CPU
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
index 894293c598d..cddfb8d386b 100644
--- a/arch/x86/kernel/trampoline_64.S
+++ b/arch/x86/kernel/trampoline_64.S
@@ -25,10 +25,11 @@
25 */ 25 */
26 26
27#include <linux/linkage.h> 27#include <linux/linkage.h>
28#include <asm/pgtable.h> 28#include <asm/pgtable_types.h>
29#include <asm/page.h> 29#include <asm/page_types.h>
30#include <asm/msr.h> 30#include <asm/msr.h>
31#include <asm/segment.h> 31#include <asm/segment.h>
32#include <asm/processor-flags.h>
32 33
33.section .rodata, "a", @progbits 34.section .rodata, "a", @progbits
34 35
@@ -37,7 +38,7 @@
37ENTRY(trampoline_data) 38ENTRY(trampoline_data)
38r_base = . 39r_base = .
39 cli # We should be safe anyway 40 cli # We should be safe anyway
40 wbinvd 41 wbinvd
41 mov %cs, %ax # Code and data in the same place 42 mov %cs, %ax # Code and data in the same place
42 mov %ax, %ds 43 mov %ax, %ds
43 mov %ax, %es 44 mov %ax, %es
@@ -73,9 +74,8 @@ r_base = .
73 lidtl tidt - r_base # load idt with 0, 0 74 lidtl tidt - r_base # load idt with 0, 0
74 lgdtl tgdt - r_base # load gdt with whatever is appropriate 75 lgdtl tgdt - r_base # load gdt with whatever is appropriate
75 76
76 xor %ax, %ax 77 mov $X86_CR0_PE, %ax # protected mode (PE) bit
77 inc %ax # protected mode (PE) bit 78 lmsw %ax # into protected mode
78 lmsw %ax # into protected mode
79 79
80 # flush prefetch and jump to startup_32 80 # flush prefetch and jump to startup_32
81 ljmpl *(startup_32_vector - r_base) 81 ljmpl *(startup_32_vector - r_base)
@@ -86,9 +86,8 @@ startup_32:
86 movl $__KERNEL_DS, %eax # Initialize the %ds segment register 86 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
87 movl %eax, %ds 87 movl %eax, %ds
88 88
89 xorl %eax, %eax 89 movl $X86_CR4_PAE, %eax
90 btsl $5, %eax # Enable PAE mode 90 movl %eax, %cr4 # Enable PAE mode
91 movl %eax, %cr4
92 91
93 # Setup trampoline 4 level pagetables 92 # Setup trampoline 4 level pagetables
94 leal (trampoline_level4_pgt - r_base)(%esi), %eax 93 leal (trampoline_level4_pgt - r_base)(%esi), %eax
@@ -99,9 +98,9 @@ startup_32:
99 xorl %edx, %edx 98 xorl %edx, %edx
100 wrmsr 99 wrmsr
101 100
102 xorl %eax, %eax 101 # Enable paging and in turn activate Long Mode
103 btsl $31, %eax # Enable paging and in turn activate Long Mode 102 # Enable protected mode
104 btsl $0, %eax # Enable protected mode 103 movl $(X86_CR0_PG | X86_CR0_PE), %eax
105 movl %eax, %cr0 104 movl %eax, %cr0
106 105
107 /* 106 /*
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index a9e7548e179..a1d288327ff 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -54,15 +54,14 @@
54#include <asm/desc.h> 54#include <asm/desc.h>
55#include <asm/i387.h> 55#include <asm/i387.h>
56 56
57#include <mach_traps.h> 57#include <asm/mach_traps.h>
58 58
59#ifdef CONFIG_X86_64 59#ifdef CONFIG_X86_64
60#include <asm/pgalloc.h> 60#include <asm/pgalloc.h>
61#include <asm/proto.h> 61#include <asm/proto.h>
62#include <asm/pda.h>
63#else 62#else
64#include <asm/processor-flags.h> 63#include <asm/processor-flags.h>
65#include <asm/arch_hooks.h> 64#include <asm/setup.h>
66#include <asm/traps.h> 65#include <asm/traps.h>
67 66
68#include "cpu/mcheck/mce.h" 67#include "cpu/mcheck/mce.h"
@@ -119,47 +118,6 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err)
119 if (!user_mode_vm(regs)) 118 if (!user_mode_vm(regs))
120 die(str, regs, err); 119 die(str, regs, err);
121} 120}
122
123/*
124 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
125 * invalid offset set (the LAZY one) and the faulting thread has
126 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS,
127 * we set the offset field correctly and return 1.
128 */
129static int lazy_iobitmap_copy(void)
130{
131 struct thread_struct *thread;
132 struct tss_struct *tss;
133 int cpu;
134
135 cpu = get_cpu();
136 tss = &per_cpu(init_tss, cpu);
137 thread = &current->thread;
138
139 if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
140 thread->io_bitmap_ptr) {
141 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
142 thread->io_bitmap_max);
143 /*
144 * If the previously set map was extending to higher ports
145 * than the current one, pad extra space with 0xff (no access).
146 */
147 if (thread->io_bitmap_max < tss->io_bitmap_max) {
148 memset((char *) tss->io_bitmap +
149 thread->io_bitmap_max, 0xff,
150 tss->io_bitmap_max - thread->io_bitmap_max);
151 }
152 tss->io_bitmap_max = thread->io_bitmap_max;
153 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
154 tss->io_bitmap_owner = thread;
155 put_cpu();
156
157 return 1;
158 }
159 put_cpu();
160
161 return 0;
162}
163#endif 121#endif
164 122
165static void __kprobes 123static void __kprobes
@@ -310,11 +268,6 @@ do_general_protection(struct pt_regs *regs, long error_code)
310 conditional_sti(regs); 268 conditional_sti(regs);
311 269
312#ifdef CONFIG_X86_32 270#ifdef CONFIG_X86_32
313 if (lazy_iobitmap_copy()) {
314 /* restart the faulting instruction */
315 return;
316 }
317
318 if (regs->flags & X86_VM_MASK) 271 if (regs->flags & X86_VM_MASK)
319 goto gp_in_vm86; 272 goto gp_in_vm86;
320#endif 273#endif
@@ -914,19 +867,20 @@ void math_emulate(struct math_emu_info *info)
914} 867}
915#endif /* CONFIG_MATH_EMULATION */ 868#endif /* CONFIG_MATH_EMULATION */
916 869
917dotraplinkage void __kprobes do_device_not_available(struct pt_regs regs) 870dotraplinkage void __kprobes
871do_device_not_available(struct pt_regs *regs, long error_code)
918{ 872{
919#ifdef CONFIG_X86_32 873#ifdef CONFIG_X86_32
920 if (read_cr0() & X86_CR0_EM) { 874 if (read_cr0() & X86_CR0_EM) {
921 struct math_emu_info info = { }; 875 struct math_emu_info info = { };
922 876
923 conditional_sti(&regs); 877 conditional_sti(regs);
924 878
925 info.regs = &regs; 879 info.regs = regs;
926 math_emulate(&info); 880 math_emulate(&info);
927 } else { 881 } else {
928 math_state_restore(); /* interrupts still off */ 882 math_state_restore(); /* interrupts still off */
929 conditional_sti(&regs); 883 conditional_sti(regs);
930 } 884 }
931#else 885#else
932 math_state_restore(); 886 math_state_restore();
@@ -942,7 +896,7 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
942 info.si_signo = SIGILL; 896 info.si_signo = SIGILL;
943 info.si_errno = 0; 897 info.si_errno = 0;
944 info.si_code = ILL_BADSTK; 898 info.si_code = ILL_BADSTK;
945 info.si_addr = 0; 899 info.si_addr = NULL;
946 if (notify_die(DIE_TRAP, "iret exception", 900 if (notify_die(DIE_TRAP, "iret exception",
947 regs, error_code, 32, SIGILL) == NOTIFY_STOP) 901 regs, error_code, 32, SIGILL) == NOTIFY_STOP)
948 return; 902 return;
@@ -1026,6 +980,6 @@ void __init trap_init(void)
1026 cpu_init(); 980 cpu_init();
1027 981
1028#ifdef CONFIG_X86_32 982#ifdef CONFIG_X86_32
1029 trap_init_hook(); 983 x86_quirk_trap_init();
1030#endif 984#endif
1031} 985}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 599e5816863..83d53ce5d4c 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -773,7 +773,7 @@ __cpuinit int unsynchronized_tsc(void)
773 if (!cpu_has_tsc || tsc_unstable) 773 if (!cpu_has_tsc || tsc_unstable)
774 return 1; 774 return 1;
775 775
776#ifdef CONFIG_X86_SMP 776#ifdef CONFIG_SMP
777 if (apic_is_clustered_box()) 777 if (apic_is_clustered_box())
778 return 1; 778 return 1;
779#endif 779#endif
diff --git a/arch/x86/kernel/uv_time.c b/arch/x86/kernel/uv_time.c
new file mode 100644
index 00000000000..2ffb6c53326
--- /dev/null
+++ b/arch/x86/kernel/uv_time.c
@@ -0,0 +1,393 @@
1/*
2 * SGI RTC clock/timer routines.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Copyright (c) 2009 Silicon Graphics, Inc. All Rights Reserved.
19 * Copyright (c) Dimitri Sivanich
20 */
21#include <linux/clockchips.h>
22
23#include <asm/uv/uv_mmrs.h>
24#include <asm/uv/uv_hub.h>
25#include <asm/uv/bios.h>
26#include <asm/uv/uv.h>
27#include <asm/apic.h>
28#include <asm/cpu.h>
29
30#define RTC_NAME "sgi_rtc"
31
32static cycle_t uv_read_rtc(void);
33static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
34static void uv_rtc_timer_setup(enum clock_event_mode,
35 struct clock_event_device *);
36
37static struct clocksource clocksource_uv = {
38 .name = RTC_NAME,
39 .rating = 400,
40 .read = uv_read_rtc,
41 .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK,
42 .shift = 10,
43 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
44};
45
46static struct clock_event_device clock_event_device_uv = {
47 .name = RTC_NAME,
48 .features = CLOCK_EVT_FEAT_ONESHOT,
49 .shift = 20,
50 .rating = 400,
51 .irq = -1,
52 .set_next_event = uv_rtc_next_event,
53 .set_mode = uv_rtc_timer_setup,
54 .event_handler = NULL,
55};
56
57static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
58
59/* There is one of these allocated per node */
60struct uv_rtc_timer_head {
61 spinlock_t lock;
62 /* next cpu waiting for timer, local node relative: */
63 int next_cpu;
64 /* number of cpus on this node: */
65 int ncpus;
66 struct {
67 int lcpu; /* systemwide logical cpu number */
68 u64 expires; /* next timer expiration for this cpu */
69 } cpu[1];
70};
71
72/*
73 * Access to uv_rtc_timer_head via blade id.
74 */
75static struct uv_rtc_timer_head **blade_info __read_mostly;
76
77static int uv_rtc_enable;
78
79/*
80 * Hardware interface routines
81 */
82
83/* Send IPIs to another node */
84static void uv_rtc_send_IPI(int cpu)
85{
86 unsigned long apicid, val;
87 int pnode;
88
89 apicid = cpu_physical_id(cpu);
90 pnode = uv_apicid_to_pnode(apicid);
91 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
92 (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
93 (GENERIC_INTERRUPT_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
94
95 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
96}
97
98/* Check for an RTC interrupt pending */
99static int uv_intr_pending(int pnode)
100{
101 return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
102 UVH_EVENT_OCCURRED0_RTC1_MASK;
103}
104
105/* Setup interrupt and return non-zero if early expiration occurred. */
106static int uv_setup_intr(int cpu, u64 expires)
107{
108 u64 val;
109 int pnode = uv_cpu_to_pnode(cpu);
110
111 uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
112 UVH_RTC1_INT_CONFIG_M_MASK);
113 uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
114
115 uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
116 UVH_EVENT_OCCURRED0_RTC1_MASK);
117
118 val = (GENERIC_INTERRUPT_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
119 ((u64)cpu_physical_id(cpu) << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
120
121 /* Set configuration */
122 uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val);
123 /* Initialize comparator value */
124 uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
125
126 return (expires < uv_read_rtc() && !uv_intr_pending(pnode));
127}
128
129/*
130 * Per-cpu timer tracking routines
131 */
132
133static __init void uv_rtc_deallocate_timers(void)
134{
135 int bid;
136
137 for_each_possible_blade(bid) {
138 kfree(blade_info[bid]);
139 }
140 kfree(blade_info);
141}
142
143/* Allocate per-node list of cpu timer expiration times. */
144static __init int uv_rtc_allocate_timers(void)
145{
146 int cpu;
147
148 blade_info = kmalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL);
149 if (!blade_info)
150 return -ENOMEM;
151 memset(blade_info, 0, uv_possible_blades * sizeof(void *));
152
153 for_each_present_cpu(cpu) {
154 int nid = cpu_to_node(cpu);
155 int bid = uv_cpu_to_blade_id(cpu);
156 int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
157 struct uv_rtc_timer_head *head = blade_info[bid];
158
159 if (!head) {
160 head = kmalloc_node(sizeof(struct uv_rtc_timer_head) +
161 (uv_blade_nr_possible_cpus(bid) *
162 2 * sizeof(u64)),
163 GFP_KERNEL, nid);
164 if (!head) {
165 uv_rtc_deallocate_timers();
166 return -ENOMEM;
167 }
168 spin_lock_init(&head->lock);
169 head->ncpus = uv_blade_nr_possible_cpus(bid);
170 head->next_cpu = -1;
171 blade_info[bid] = head;
172 }
173
174 head->cpu[bcpu].lcpu = cpu;
175 head->cpu[bcpu].expires = ULLONG_MAX;
176 }
177
178 return 0;
179}
180
181/* Find and set the next expiring timer. */
182static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode)
183{
184 u64 lowest = ULLONG_MAX;
185 int c, bcpu = -1;
186
187 head->next_cpu = -1;
188 for (c = 0; c < head->ncpus; c++) {
189 u64 exp = head->cpu[c].expires;
190 if (exp < lowest) {
191 bcpu = c;
192 lowest = exp;
193 }
194 }
195 if (bcpu >= 0) {
196 head->next_cpu = bcpu;
197 c = head->cpu[bcpu].lcpu;
198 if (uv_setup_intr(c, lowest))
199 /* If we didn't set it up in time, trigger */
200 uv_rtc_send_IPI(c);
201 } else {
202 uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
203 UVH_RTC1_INT_CONFIG_M_MASK);
204 }
205}
206
207/*
208 * Set expiration time for current cpu.
209 *
210 * Returns 1 if we missed the expiration time.
211 */
212static int uv_rtc_set_timer(int cpu, u64 expires)
213{
214 int pnode = uv_cpu_to_pnode(cpu);
215 int bid = uv_cpu_to_blade_id(cpu);
216 struct uv_rtc_timer_head *head = blade_info[bid];
217 int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
218 u64 *t = &head->cpu[bcpu].expires;
219 unsigned long flags;
220 int next_cpu;
221
222 spin_lock_irqsave(&head->lock, flags);
223
224 next_cpu = head->next_cpu;
225 *t = expires;
226 /* Will this one be next to go off? */
227 if (next_cpu < 0 || bcpu == next_cpu ||
228 expires < head->cpu[next_cpu].expires) {
229 head->next_cpu = bcpu;
230 if (uv_setup_intr(cpu, expires)) {
231 *t = ULLONG_MAX;
232 uv_rtc_find_next_timer(head, pnode);
233 spin_unlock_irqrestore(&head->lock, flags);
234 return 1;
235 }
236 }
237
238 spin_unlock_irqrestore(&head->lock, flags);
239 return 0;
240}
241
242/*
243 * Unset expiration time for current cpu.
244 *
245 * Returns 1 if this timer was pending.
246 */
247static int uv_rtc_unset_timer(int cpu)
248{
249 int pnode = uv_cpu_to_pnode(cpu);
250 int bid = uv_cpu_to_blade_id(cpu);
251 struct uv_rtc_timer_head *head = blade_info[bid];
252 int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
253 u64 *t = &head->cpu[bcpu].expires;
254 unsigned long flags;
255 int rc = 0;
256
257 spin_lock_irqsave(&head->lock, flags);
258
259 if (head->next_cpu == bcpu && uv_read_rtc() >= *t)
260 rc = 1;
261
262 *t = ULLONG_MAX;
263
264 /* Was the hardware setup for this timer? */
265 if (head->next_cpu == bcpu)
266 uv_rtc_find_next_timer(head, pnode);
267
268 spin_unlock_irqrestore(&head->lock, flags);
269
270 return rc;
271}
272
273
274/*
275 * Kernel interface routines.
276 */
277
278/*
279 * Read the RTC.
280 */
281static cycle_t uv_read_rtc(void)
282{
283 return (cycle_t)uv_read_local_mmr(UVH_RTC);
284}
285
286/*
287 * Program the next event, relative to now
288 */
289static int uv_rtc_next_event(unsigned long delta,
290 struct clock_event_device *ced)
291{
292 int ced_cpu = cpumask_first(ced->cpumask);
293
294 return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc());
295}
296
297/*
298 * Setup the RTC timer in oneshot mode
299 */
300static void uv_rtc_timer_setup(enum clock_event_mode mode,
301 struct clock_event_device *evt)
302{
303 int ced_cpu = cpumask_first(evt->cpumask);
304
305 switch (mode) {
306 case CLOCK_EVT_MODE_PERIODIC:
307 case CLOCK_EVT_MODE_ONESHOT:
308 case CLOCK_EVT_MODE_RESUME:
309 /* Nothing to do here yet */
310 break;
311 case CLOCK_EVT_MODE_UNUSED:
312 case CLOCK_EVT_MODE_SHUTDOWN:
313 uv_rtc_unset_timer(ced_cpu);
314 break;
315 }
316}
317
318static void uv_rtc_interrupt(void)
319{
320 struct clock_event_device *ced = &__get_cpu_var(cpu_ced);
321 int cpu = smp_processor_id();
322
323 if (!ced || !ced->event_handler)
324 return;
325
326 if (uv_rtc_unset_timer(cpu) != 1)
327 return;
328
329 ced->event_handler(ced);
330}
331
332static int __init uv_enable_rtc(char *str)
333{
334 uv_rtc_enable = 1;
335
336 return 1;
337}
338__setup("uvrtc", uv_enable_rtc);
339
340static __init void uv_rtc_register_clockevents(struct work_struct *dummy)
341{
342 struct clock_event_device *ced = &__get_cpu_var(cpu_ced);
343
344 *ced = clock_event_device_uv;
345 ced->cpumask = cpumask_of(smp_processor_id());
346 clockevents_register_device(ced);
347}
348
349static __init int uv_rtc_setup_clock(void)
350{
351 int rc;
352
353 if (!uv_rtc_enable || !is_uv_system() || generic_interrupt_extension)
354 return -ENODEV;
355
356 generic_interrupt_extension = uv_rtc_interrupt;
357
358 clocksource_uv.mult = clocksource_hz2mult(sn_rtc_cycles_per_second,
359 clocksource_uv.shift);
360
361 rc = clocksource_register(&clocksource_uv);
362 if (rc) {
363 generic_interrupt_extension = NULL;
364 return rc;
365 }
366
367 /* Setup and register clockevents */
368 rc = uv_rtc_allocate_timers();
369 if (rc) {
370 clocksource_unregister(&clocksource_uv);
371 generic_interrupt_extension = NULL;
372 return rc;
373 }
374
375 clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second,
376 NSEC_PER_SEC, clock_event_device_uv.shift);
377
378 clock_event_device_uv.min_delta_ns = NSEC_PER_SEC /
379 sn_rtc_cycles_per_second;
380
381 clock_event_device_uv.max_delta_ns = clocksource_uv.mask *
382 (NSEC_PER_SEC / sn_rtc_cycles_per_second);
383
384 rc = schedule_on_each_cpu(uv_rtc_register_clockevents);
385 if (rc) {
386 clocksource_unregister(&clocksource_uv);
387 generic_interrupt_extension = NULL;
388 uv_rtc_deallocate_timers();
389 }
390
391 return rc;
392}
393arch_initcall(uv_rtc_setup_clock);
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index d801d06af06..31ffc24eec4 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -24,18 +24,14 @@
24 24
25#include <asm/visws/cobalt.h> 25#include <asm/visws/cobalt.h>
26#include <asm/visws/piix4.h> 26#include <asm/visws/piix4.h>
27#include <asm/arch_hooks.h>
28#include <asm/io_apic.h> 27#include <asm/io_apic.h>
29#include <asm/fixmap.h> 28#include <asm/fixmap.h>
30#include <asm/reboot.h> 29#include <asm/reboot.h>
31#include <asm/setup.h> 30#include <asm/setup.h>
31#include <asm/apic.h>
32#include <asm/e820.h> 32#include <asm/e820.h>
33#include <asm/io.h> 33#include <asm/io.h>
34 34
35#include <mach_ipi.h>
36
37#include "mach_apic.h"
38
39#include <linux/kernel_stat.h> 35#include <linux/kernel_stat.h>
40 36
41#include <asm/i8259.h> 37#include <asm/i8259.h>
@@ -49,8 +45,6 @@
49 45
50extern int no_broadcast; 46extern int no_broadcast;
51 47
52#include <asm/apic.h>
53
54char visws_board_type = -1; 48char visws_board_type = -1;
55char visws_board_rev = -1; 49char visws_board_rev = -1;
56 50
@@ -200,7 +194,7 @@ static void __init MP_processor_info(struct mpc_cpu *m)
200 return; 194 return;
201 } 195 }
202 196
203 apic_cpus = apicid_to_cpu_present(m->apicid); 197 apic_cpus = apic->apicid_to_cpu_present(m->apicid);
204 physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus); 198 physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus);
205 /* 199 /*
206 * Validate version 200 * Validate version
@@ -584,7 +578,7 @@ static struct irq_chip piix4_virtual_irq_type = {
584static irqreturn_t piix4_master_intr(int irq, void *dev_id) 578static irqreturn_t piix4_master_intr(int irq, void *dev_id)
585{ 579{
586 int realirq; 580 int realirq;
587 irq_desc_t *desc; 581 struct irq_desc *desc;
588 unsigned long flags; 582 unsigned long flags;
589 583
590 spin_lock_irqsave(&i8259A_lock, flags); 584 spin_lock_irqsave(&i8259A_lock, flags);
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 4eeb5cf9720..d7ac84e7fc1 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -158,7 +158,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
158 ret = KVM86->regs32; 158 ret = KVM86->regs32;
159 159
160 ret->fs = current->thread.saved_fs; 160 ret->fs = current->thread.saved_fs;
161 loadsegment(gs, current->thread.saved_gs); 161 set_user_gs(ret, current->thread.saved_gs);
162 162
163 return ret; 163 return ret;
164} 164}
@@ -197,9 +197,9 @@ out:
197static int do_vm86_irq_handling(int subfunction, int irqnumber); 197static int do_vm86_irq_handling(int subfunction, int irqnumber);
198static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); 198static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
199 199
200asmlinkage int sys_vm86old(struct pt_regs regs) 200int sys_vm86old(struct pt_regs *regs)
201{ 201{
202 struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.bx; 202 struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs->bx;
203 struct kernel_vm86_struct info; /* declare this _on top_, 203 struct kernel_vm86_struct info; /* declare this _on top_,
204 * this avoids wasting of stack space. 204 * this avoids wasting of stack space.
205 * This remains on the stack until we 205 * This remains on the stack until we
@@ -218,7 +218,7 @@ asmlinkage int sys_vm86old(struct pt_regs regs)
218 if (tmp) 218 if (tmp)
219 goto out; 219 goto out;
220 memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); 220 memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus);
221 info.regs32 = &regs; 221 info.regs32 = regs;
222 tsk->thread.vm86_info = v86; 222 tsk->thread.vm86_info = v86;
223 do_sys_vm86(&info, tsk); 223 do_sys_vm86(&info, tsk);
224 ret = 0; /* we never return here */ 224 ret = 0; /* we never return here */
@@ -227,7 +227,7 @@ out:
227} 227}
228 228
229 229
230asmlinkage int sys_vm86(struct pt_regs regs) 230int sys_vm86(struct pt_regs *regs)
231{ 231{
232 struct kernel_vm86_struct info; /* declare this _on top_, 232 struct kernel_vm86_struct info; /* declare this _on top_,
233 * this avoids wasting of stack space. 233 * this avoids wasting of stack space.
@@ -239,12 +239,12 @@ asmlinkage int sys_vm86(struct pt_regs regs)
239 struct vm86plus_struct __user *v86; 239 struct vm86plus_struct __user *v86;
240 240
241 tsk = current; 241 tsk = current;
242 switch (regs.bx) { 242 switch (regs->bx) {
243 case VM86_REQUEST_IRQ: 243 case VM86_REQUEST_IRQ:
244 case VM86_FREE_IRQ: 244 case VM86_FREE_IRQ:
245 case VM86_GET_IRQ_BITS: 245 case VM86_GET_IRQ_BITS:
246 case VM86_GET_AND_RESET_IRQ: 246 case VM86_GET_AND_RESET_IRQ:
247 ret = do_vm86_irq_handling(regs.bx, (int)regs.cx); 247 ret = do_vm86_irq_handling(regs->bx, (int)regs->cx);
248 goto out; 248 goto out;
249 case VM86_PLUS_INSTALL_CHECK: 249 case VM86_PLUS_INSTALL_CHECK:
250 /* 250 /*
@@ -261,14 +261,14 @@ asmlinkage int sys_vm86(struct pt_regs regs)
261 ret = -EPERM; 261 ret = -EPERM;
262 if (tsk->thread.saved_sp0) 262 if (tsk->thread.saved_sp0)
263 goto out; 263 goto out;
264 v86 = (struct vm86plus_struct __user *)regs.cx; 264 v86 = (struct vm86plus_struct __user *)regs->cx;
265 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, 265 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
266 offsetof(struct kernel_vm86_struct, regs32) - 266 offsetof(struct kernel_vm86_struct, regs32) -
267 sizeof(info.regs)); 267 sizeof(info.regs));
268 ret = -EFAULT; 268 ret = -EFAULT;
269 if (tmp) 269 if (tmp)
270 goto out; 270 goto out;
271 info.regs32 = &regs; 271 info.regs32 = regs;
272 info.vm86plus.is_vm86pus = 1; 272 info.vm86plus.is_vm86pus = 1;
273 tsk->thread.vm86_info = (struct vm86_struct __user *)v86; 273 tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
274 do_sys_vm86(&info, tsk); 274 do_sys_vm86(&info, tsk);
@@ -323,7 +323,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
323 info->regs32->ax = 0; 323 info->regs32->ax = 0;
324 tsk->thread.saved_sp0 = tsk->thread.sp0; 324 tsk->thread.saved_sp0 = tsk->thread.sp0;
325 tsk->thread.saved_fs = info->regs32->fs; 325 tsk->thread.saved_fs = info->regs32->fs;
326 savesegment(gs, tsk->thread.saved_gs); 326 tsk->thread.saved_gs = get_user_gs(info->regs32);
327 327
328 tss = &per_cpu(init_tss, get_cpu()); 328 tss = &per_cpu(init_tss, get_cpu());
329 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; 329 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index bef58b4982d..2cc4a90e2cb 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -680,10 +680,11 @@ static inline int __init activate_vmi(void)
680 para_fill(pv_mmu_ops.write_cr2, SetCR2); 680 para_fill(pv_mmu_ops.write_cr2, SetCR2);
681 para_fill(pv_mmu_ops.write_cr3, SetCR3); 681 para_fill(pv_mmu_ops.write_cr3, SetCR3);
682 para_fill(pv_cpu_ops.write_cr4, SetCR4); 682 para_fill(pv_cpu_ops.write_cr4, SetCR4);
683 para_fill(pv_irq_ops.save_fl, GetInterruptMask); 683
684 para_fill(pv_irq_ops.restore_fl, SetInterruptMask); 684 para_fill(pv_irq_ops.save_fl.func, GetInterruptMask);
685 para_fill(pv_irq_ops.irq_disable, DisableInterrupts); 685 para_fill(pv_irq_ops.restore_fl.func, SetInterruptMask);
686 para_fill(pv_irq_ops.irq_enable, EnableInterrupts); 686 para_fill(pv_irq_ops.irq_disable.func, DisableInterrupts);
687 para_fill(pv_irq_ops.irq_enable.func, EnableInterrupts);
687 688
688 para_fill(pv_cpu_ops.wbinvd, WBINVD); 689 para_fill(pv_cpu_ops.wbinvd, WBINVD);
689 para_fill(pv_cpu_ops.read_tsc, RDTSC); 690 para_fill(pv_cpu_ops.read_tsc, RDTSC);
@@ -797,8 +798,8 @@ static inline int __init activate_vmi(void)
797#endif 798#endif
798 799
799#ifdef CONFIG_X86_LOCAL_APIC 800#ifdef CONFIG_X86_LOCAL_APIC
800 para_fill(apic_ops->read, APICRead); 801 para_fill(apic->read, APICRead);
801 para_fill(apic_ops->write, APICWrite); 802 para_fill(apic->write, APICWrite);
802#endif 803#endif
803 804
804 /* 805 /*
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index e5b088fffa4..33a788d5879 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -28,7 +28,6 @@
28 28
29#include <asm/vmi.h> 29#include <asm/vmi.h>
30#include <asm/vmi_time.h> 30#include <asm/vmi_time.h>
31#include <asm/arch_hooks.h>
32#include <asm/apicdef.h> 31#include <asm/apicdef.h>
33#include <asm/apic.h> 32#include <asm/apic.h>
34#include <asm/timer.h> 33#include <asm/timer.h>
@@ -256,7 +255,7 @@ void __devinit vmi_time_bsp_init(void)
256 */ 255 */
257 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); 256 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
258 local_irq_disable(); 257 local_irq_disable();
259#ifdef CONFIG_X86_SMP 258#ifdef CONFIG_SMP
260 /* 259 /*
261 * XXX handle_percpu_irq only defined for SMP; we need to switch over 260 * XXX handle_percpu_irq only defined for SMP; we need to switch over
262 * to using it, since this is a local interrupt, which each CPU must 261 * to using it, since this is a local interrupt, which each CPU must
@@ -288,8 +287,7 @@ static struct clocksource clocksource_vmi;
288static cycle_t read_real_cycles(void) 287static cycle_t read_real_cycles(void)
289{ 288{
290 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); 289 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
291 return ret >= clocksource_vmi.cycle_last ? 290 return max(ret, clocksource_vmi.cycle_last);
292 ret : clocksource_vmi.cycle_last;
293} 291}
294 292
295static struct clocksource clocksource_vmi = { 293static struct clocksource clocksource_vmi = {
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S
index 82c67559dde..0d860963f26 100644
--- a/arch/x86/kernel/vmlinux_32.lds.S
+++ b/arch/x86/kernel/vmlinux_32.lds.S
@@ -12,7 +12,7 @@
12 12
13#include <asm-generic/vmlinux.lds.h> 13#include <asm-generic/vmlinux.lds.h>
14#include <asm/thread_info.h> 14#include <asm/thread_info.h>
15#include <asm/page.h> 15#include <asm/page_types.h>
16#include <asm/cache.h> 16#include <asm/cache.h>
17#include <asm/boot.h> 17#include <asm/boot.h>
18 18
@@ -178,14 +178,7 @@ SECTIONS
178 __initramfs_end = .; 178 __initramfs_end = .;
179 } 179 }
180#endif 180#endif
181 . = ALIGN(PAGE_SIZE); 181 PERCPU(PAGE_SIZE)
182 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
183 __per_cpu_start = .;
184 *(.data.percpu.page_aligned)
185 *(.data.percpu)
186 *(.data.percpu.shared_aligned)
187 __per_cpu_end = .;
188 }
189 . = ALIGN(PAGE_SIZE); 182 . = ALIGN(PAGE_SIZE);
190 /* freed after init ends here */ 183 /* freed after init ends here */
191 184
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index 1a614c0e6be..5bf54e40c6e 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -5,7 +5,8 @@
5#define LOAD_OFFSET __START_KERNEL_map 5#define LOAD_OFFSET __START_KERNEL_map
6 6
7#include <asm-generic/vmlinux.lds.h> 7#include <asm-generic/vmlinux.lds.h>
8#include <asm/page.h> 8#include <asm/asm-offsets.h>
9#include <asm/page_types.h>
9 10
10#undef i386 /* in case the preprocessor is a 32bit one */ 11#undef i386 /* in case the preprocessor is a 32bit one */
11 12
@@ -13,12 +14,15 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
13OUTPUT_ARCH(i386:x86-64) 14OUTPUT_ARCH(i386:x86-64)
14ENTRY(phys_startup_64) 15ENTRY(phys_startup_64)
15jiffies_64 = jiffies; 16jiffies_64 = jiffies;
16_proxy_pda = 1;
17PHDRS { 17PHDRS {
18 text PT_LOAD FLAGS(5); /* R_E */ 18 text PT_LOAD FLAGS(5); /* R_E */
19 data PT_LOAD FLAGS(7); /* RWE */ 19 data PT_LOAD FLAGS(7); /* RWE */
20 user PT_LOAD FLAGS(7); /* RWE */ 20 user PT_LOAD FLAGS(7); /* RWE */
21 data.init PT_LOAD FLAGS(7); /* RWE */ 21 data.init PT_LOAD FLAGS(7); /* RWE */
22#ifdef CONFIG_SMP
23 percpu PT_LOAD FLAGS(7); /* RWE */
24#endif
25 data.init2 PT_LOAD FLAGS(7); /* RWE */
22 note PT_NOTE FLAGS(0); /* ___ */ 26 note PT_NOTE FLAGS(0); /* ___ */
23} 27}
24SECTIONS 28SECTIONS
@@ -208,14 +212,28 @@ SECTIONS
208 __initramfs_end = .; 212 __initramfs_end = .;
209#endif 213#endif
210 214
215#ifdef CONFIG_SMP
216 /*
217 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
218 * output PHDR, so the next output section - __data_nosave - should
219 * start another section data.init2. Also, pda should be at the head of
220 * percpu area. Preallocate it and define the percpu offset symbol
221 * so that it can be accessed as a percpu variable.
222 */
223 . = ALIGN(PAGE_SIZE);
224 PERCPU_VADDR(0, :percpu)
225#else
211 PERCPU(PAGE_SIZE) 226 PERCPU(PAGE_SIZE)
227#endif
212 228
213 . = ALIGN(PAGE_SIZE); 229 . = ALIGN(PAGE_SIZE);
214 __init_end = .; 230 __init_end = .;
215 231
216 . = ALIGN(PAGE_SIZE); 232 . = ALIGN(PAGE_SIZE);
217 __nosave_begin = .; 233 __nosave_begin = .;
218 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) } 234 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
235 *(.data.nosave)
236 } :data.init2 /* use another section data.init2, see PERCPU_VADDR() above */
219 . = ALIGN(PAGE_SIZE); 237 . = ALIGN(PAGE_SIZE);
220 __nosave_end = .; 238 __nosave_end = .;
221 239
@@ -239,8 +257,28 @@ SECTIONS
239 DWARF_DEBUG 257 DWARF_DEBUG
240} 258}
241 259
260 /*
261 * Per-cpu symbols which need to be offset from __per_cpu_load
262 * for the boot processor.
263 */
264#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
265INIT_PER_CPU(gdt_page);
266INIT_PER_CPU(irq_stack_union);
267
242/* 268/*
243 * Build-time check on the image size: 269 * Build-time check on the image size:
244 */ 270 */
245ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), 271ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
246 "kernel image bigger than KERNEL_IMAGE_SIZE") 272 "kernel image bigger than KERNEL_IMAGE_SIZE")
273
274#ifdef CONFIG_SMP
275ASSERT((per_cpu__irq_stack_union == 0),
276 "irq_stack_union is not at start of per-cpu area");
277#endif
278
279#ifdef CONFIG_KEXEC
280#include <asm/kexec.h>
281
282ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
283 "kexec control code size is too big")
284#endif
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index a688f3bfaec..74de562812c 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -22,7 +22,7 @@
22#include <asm/paravirt.h> 22#include <asm/paravirt.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24 24
25#if defined CONFIG_PCI && defined CONFIG_PARAVIRT 25#ifdef CONFIG_PARAVIRT
26/* 26/*
27 * Interrupt control on vSMPowered systems: 27 * Interrupt control on vSMPowered systems:
28 * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' 28 * ~AC is a shadow of IF. If IF is 'on' AC should be 'off'
@@ -37,6 +37,7 @@ static unsigned long vsmp_save_fl(void)
37 flags &= ~X86_EFLAGS_IF; 37 flags &= ~X86_EFLAGS_IF;
38 return flags; 38 return flags;
39} 39}
40PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
40 41
41static void vsmp_restore_fl(unsigned long flags) 42static void vsmp_restore_fl(unsigned long flags)
42{ 43{
@@ -46,6 +47,7 @@ static void vsmp_restore_fl(unsigned long flags)
46 flags |= X86_EFLAGS_AC; 47 flags |= X86_EFLAGS_AC;
47 native_restore_fl(flags); 48 native_restore_fl(flags);
48} 49}
50PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
49 51
50static void vsmp_irq_disable(void) 52static void vsmp_irq_disable(void)
51{ 53{
@@ -53,6 +55,7 @@ static void vsmp_irq_disable(void)
53 55
54 native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); 56 native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
55} 57}
58PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
56 59
57static void vsmp_irq_enable(void) 60static void vsmp_irq_enable(void)
58{ 61{
@@ -60,6 +63,7 @@ static void vsmp_irq_enable(void)
60 63
61 native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); 64 native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
62} 65}
66PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
63 67
64static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf, 68static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
65 unsigned long addr, unsigned len) 69 unsigned long addr, unsigned len)
@@ -90,10 +94,10 @@ static void __init set_vsmp_pv_ops(void)
90 cap, ctl); 94 cap, ctl);
91 if (cap & ctl & (1 << 4)) { 95 if (cap & ctl & (1 << 4)) {
92 /* Setup irq ops and turn on vSMP IRQ fastpath handling */ 96 /* Setup irq ops and turn on vSMP IRQ fastpath handling */
93 pv_irq_ops.irq_disable = vsmp_irq_disable; 97 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
94 pv_irq_ops.irq_enable = vsmp_irq_enable; 98 pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
95 pv_irq_ops.save_fl = vsmp_save_fl; 99 pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
96 pv_irq_ops.restore_fl = vsmp_restore_fl; 100 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
97 pv_init_ops.patch = vsmp_patch; 101 pv_init_ops.patch = vsmp_patch;
98 102
99 ctl &= ~(1 << 4); 103 ctl &= ~(1 << 4);
@@ -110,7 +114,6 @@ static void __init set_vsmp_pv_ops(void)
110} 114}
111#endif 115#endif
112 116
113#ifdef CONFIG_PCI
114static int is_vsmp = -1; 117static int is_vsmp = -1;
115 118
116static void __init detect_vsmp_box(void) 119static void __init detect_vsmp_box(void)
@@ -135,15 +138,6 @@ int is_vsmp_box(void)
135 return 0; 138 return 0;
136 } 139 }
137} 140}
138#else
139static void __init detect_vsmp_box(void)
140{
141}
142int is_vsmp_box(void)
143{
144 return 0;
145}
146#endif
147 141
148void __init vsmp_init(void) 142void __init vsmp_init(void)
149{ 143{
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 695e426aa35..3909e3ba5ce 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -58,5 +58,3 @@ EXPORT_SYMBOL(__memcpy);
58EXPORT_SYMBOL(empty_zero_page); 58EXPORT_SYMBOL(empty_zero_page);
59EXPORT_SYMBOL(init_level4_pgt); 59EXPORT_SYMBOL(init_level4_pgt);
60EXPORT_SYMBOL(load_gs_index); 60EXPORT_SYMBOL(load_gs_index);
61
62EXPORT_SYMBOL(_proxy_pda);