aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-19 18:06:00 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-19 18:06:00 -0400
commit60812a4a99b796d894d2522dc63cb0fafc3be25e (patch)
treebbf3a441b71e3b9b670d91652094114852272db8 /arch
parentb04cde34cf1d006dfaf8523640f3a18bbb15ebaa (diff)
parent92cb7612aee39642d109b8d935ad265e602c0563 (diff)
Merge ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86
* ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86: (33 commits) x86: convert cpuinfo_x86 array to a per_cpu array x86: introduce frame_pointer() and stack_pointer() x86 & generic: change to __builtin_prefetch() i386: do not BUG_ON() when MSR is unknown x86: acpi use cpu_physical_id x86: convert cpu_llc_id to be a per cpu variable x86: convert cpu_to_apicid to be a per cpu variable i386: introduce "used_vectors" bitmap which can be used to reserve vectors. x86: use raw locks during oopses x86: honor _PAGE_PSE bit on page walks i386: do cpuid_device_create() in CPU_UP_PREPARE instead of CPU_ONLINE. x86: implement missing x86_64 function smp_call_function_mask() x86: use descriptor's functions instead of inline assembly i386: consolidate show_regs and show_registers for i386 i386: make callgraph use dump_trace() on i386/x86_64 x86: enable iommu_merge by default i386: i386 add AMD64 Barcelona PMU MSR definitions to msr.h x86: Unify i386 and x86-64 early quirks x86: enable HPET on ICH3 and ICH4 x86: force enable HPET on VT8235/8237 chipsets ... Manually fix trivial conflict with task pid container helper changes in arch/x86/kernel/process_32.c
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/Kconfig2
-rw-r--r--arch/i386/Makefile6
-rw-r--r--arch/x86/ia32/ia32_binfmt.c124
-rw-r--r--arch/x86/kernel/Makefile_323
-rw-r--r--arch/x86/kernel/Makefile_644
-rw-r--r--arch/x86/kernel/acpi/Makefile_323
-rw-r--r--arch/x86/kernel/acpi/boot.c2
-rw-r--r--arch/x86/kernel/acpi/cstate.c4
-rw-r--r--arch/x86/kernel/acpi/earlyquirk_32.c84
-rw-r--r--arch/x86/kernel/acpi/processor.c2
-rw-r--r--arch/x86/kernel/alternative.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Kconfig_32 (renamed from arch/x86/kernel/cpu/cpufreq/Kconfig)0
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Kconfig_64 (renamed from arch/x86/kernel/cpufreq/Kconfig)2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/e_powersaver.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/elanfreq.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longhaul.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longrun.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k6.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/sc520_freq.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-lib.c2
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c12
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c16
-rw-r--r--arch/x86/kernel/cpu/proc.c11
-rw-r--r--arch/x86/kernel/cpuid.c34
-rw-r--r--arch/x86/kernel/early-quirks.c (renamed from arch/x86/kernel/early-quirks_64.c)19
-rw-r--r--arch/x86/kernel/genapic_64.c15
-rw-r--r--arch/x86/kernel/genapic_flat_64.c2
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/hpet.c3
-rw-r--r--arch/x86/kernel/i8259_32.c3
-rw-r--r--arch/x86/kernel/init_task.c (renamed from arch/x86/kernel/init_task_32.c)11
-rw-r--r--arch/x86/kernel/init_task_64.c54
-rw-r--r--arch/x86/kernel/io_apic_32.c13
-rw-r--r--arch/x86/kernel/mce_64.c3
-rw-r--r--arch/x86/kernel/mce_amd_64.c4
-rw-r--r--arch/x86/kernel/microcode.c6
-rw-r--r--arch/x86/kernel/mpparse_64.c17
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/pci-dma_64.c2
-rw-r--r--arch/x86/kernel/process_32.c56
-rw-r--r--arch/x86/kernel/quirks.c112
-rw-r--r--arch/x86/kernel/reboot_64.c3
-rw-r--r--arch/x86/kernel/reboot_fixups_32.c8
-rw-r--r--arch/x86/kernel/setup64.c10
-rw-r--r--arch/x86/kernel/setup_32.c4
-rw-r--r--arch/x86/kernel/setup_64.c25
-rw-r--r--arch/x86/kernel/smp_32.c4
-rw-r--r--arch/x86/kernel/smp_64.c119
-rw-r--r--arch/x86/kernel/smpboot_32.c81
-rw-r--r--arch/x86/kernel/smpboot_64.c74
-rw-r--r--arch/x86/kernel/suspend_64.c11
-rw-r--r--arch/x86/kernel/traps_32.c54
-rw-r--r--arch/x86/kernel/traps_64.c16
-rw-r--r--arch/x86/kernel/tsc_32.c8
-rw-r--r--arch/x86/kernel/tsc_64.c4
-rw-r--r--arch/x86/kernel/vsyscall_64.c4
-rw-r--r--arch/x86/lib/delay_32.c2
-rw-r--r--arch/x86/lib/delay_64.c3
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c12
-rw-r--r--arch/x86/mm/fault_32.c3
-rw-r--r--arch/x86/mm/fault_64.c16
-rw-r--r--arch/x86/mm/numa_64.c2
-rw-r--r--arch/x86/oprofile/backtrace.c110
-rw-r--r--arch/x86_64/.gitignore1
-rw-r--r--arch/x86_64/Kconfig8
-rw-r--r--arch/x86_64/Makefile6
70 files changed, 630 insertions, 635 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index d0a4ea1ba14d..d1bedbf9deb8 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -1080,7 +1080,7 @@ config APM_REAL_MODE_POWER_OFF
1080 1080
1081endif # APM 1081endif # APM
1082 1082
1083source "arch/x86/kernel/cpu/cpufreq/Kconfig" 1083source "arch/x86/kernel/cpu/cpufreq/Kconfig_32"
1084 1084
1085source "drivers/cpuidle/Kconfig" 1085source "drivers/cpuidle/Kconfig"
1086 1086
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index f036d2dee3de..b88e47ca3032 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -102,7 +102,7 @@ core-$(CONFIG_XEN) += arch/x86/xen/
102# default subarch .h files 102# default subarch .h files
103mflags-y += -Iinclude/asm-x86/mach-default 103mflags-y += -Iinclude/asm-x86/mach-default
104 104
105head-y := arch/x86/kernel/head_32.o arch/x86/kernel/init_task_32.o 105head-y := arch/x86/kernel/head_32.o arch/x86/kernel/init_task.o
106 106
107libs-y += arch/x86/lib/ 107libs-y += arch/x86/lib/
108core-y += arch/x86/kernel/ \ 108core-y += arch/x86/kernel/ \
@@ -131,9 +131,9 @@ all: bzImage
131zImage zlilo zdisk: KBUILD_IMAGE := arch/x86/boot/zImage 131zImage zlilo zdisk: KBUILD_IMAGE := arch/x86/boot/zImage
132 132
133zImage bzImage: vmlinux 133zImage bzImage: vmlinux
134 $(Q)mkdir -p $(objtree)/arch/i386/boot
135 $(Q)ln -fsn $(objtree)/arch/x86/boot/bzImage $(objtree)/arch/i386/boot/bzImage
136 $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE) 134 $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE)
135 $(Q)mkdir -p $(objtree)/arch/i386/boot
136 $(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/i386/boot/bzImage
137 137
138compressed: zImage 138compressed: zImage
139 139
diff --git a/arch/x86/ia32/ia32_binfmt.c b/arch/x86/ia32/ia32_binfmt.c
index 5027650eb273..55822d2cf053 100644
--- a/arch/x86/ia32/ia32_binfmt.c
+++ b/arch/x86/ia32/ia32_binfmt.c
@@ -5,10 +5,6 @@
5 * This tricks binfmt_elf.c into loading 32bit binaries using lots 5 * This tricks binfmt_elf.c into loading 32bit binaries using lots
6 * of ugly preprocessor tricks. Talk about very very poor man's inheritance. 6 * of ugly preprocessor tricks. Talk about very very poor man's inheritance.
7 */ 7 */
8#define __ASM_X86_64_ELF_H 1
9
10#undef ELF_CLASS
11#define ELF_CLASS ELFCLASS32
12 8
13#include <linux/types.h> 9#include <linux/types.h>
14#include <linux/stddef.h> 10#include <linux/stddef.h>
@@ -19,6 +15,7 @@
19#include <linux/binfmts.h> 15#include <linux/binfmts.h>
20#include <linux/mm.h> 16#include <linux/mm.h>
21#include <linux/security.h> 17#include <linux/security.h>
18#include <linux/elfcore-compat.h>
22 19
23#include <asm/segment.h> 20#include <asm/segment.h>
24#include <asm/ptrace.h> 21#include <asm/ptrace.h>
@@ -31,6 +28,20 @@
31#include <asm/ia32.h> 28#include <asm/ia32.h>
32#include <asm/vsyscall32.h> 29#include <asm/vsyscall32.h>
33 30
31#undef ELF_ARCH
32#undef ELF_CLASS
33#define ELF_CLASS ELFCLASS32
34#define ELF_ARCH EM_386
35
36#undef elfhdr
37#undef elf_phdr
38#undef elf_note
39#undef elf_addr_t
40#define elfhdr elf32_hdr
41#define elf_phdr elf32_phdr
42#define elf_note elf32_note
43#define elf_addr_t Elf32_Off
44
34#define ELF_NAME "elf/i386" 45#define ELF_NAME "elf/i386"
35 46
36#define AT_SYSINFO 32 47#define AT_SYSINFO 32
@@ -48,74 +59,20 @@ int sysctl_vsyscall32 = 1;
48} while(0) 59} while(0)
49 60
50struct file; 61struct file;
51struct elf_phdr;
52 62
53#define IA32_EMULATOR 1 63#define IA32_EMULATOR 1
54 64
55#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) 65#undef ELF_ET_DYN_BASE
56
57#undef ELF_ARCH
58#define ELF_ARCH EM_386
59
60#define ELF_DATA ELFDATA2LSB
61 66
62#define USE_ELF_CORE_DUMP 1 67#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
63
64/* Override elfcore.h */
65#define _LINUX_ELFCORE_H 1
66typedef unsigned int elf_greg_t;
67
68#define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t))
69typedef elf_greg_t elf_gregset_t[ELF_NGREG];
70
71struct elf_siginfo
72{
73 int si_signo; /* signal number */
74 int si_code; /* extra code */
75 int si_errno; /* errno */
76};
77 68
78#define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0) 69#define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0)
79 70
80struct elf_prstatus
81{
82 struct elf_siginfo pr_info; /* Info associated with signal */
83 short pr_cursig; /* Current signal */
84 unsigned int pr_sigpend; /* Set of pending signals */
85 unsigned int pr_sighold; /* Set of held signals */
86 pid_t pr_pid;
87 pid_t pr_ppid;
88 pid_t pr_pgrp;
89 pid_t pr_sid;
90 struct compat_timeval pr_utime; /* User time */
91 struct compat_timeval pr_stime; /* System time */
92 struct compat_timeval pr_cutime; /* Cumulative user time */
93 struct compat_timeval pr_cstime; /* Cumulative system time */
94 elf_gregset_t pr_reg; /* GP registers */
95 int pr_fpvalid; /* True if math co-processor being used. */
96};
97
98#define ELF_PRARGSZ (80) /* Number of chars for args */
99
100struct elf_prpsinfo
101{
102 char pr_state; /* numeric process state */
103 char pr_sname; /* char for pr_state */
104 char pr_zomb; /* zombie */
105 char pr_nice; /* nice val */
106 unsigned int pr_flag; /* flags */
107 __u16 pr_uid;
108 __u16 pr_gid;
109 pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
110 /* Lots missing */
111 char pr_fname[16]; /* filename of executable */
112 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
113};
114
115#define _GET_SEG(x) \ 71#define _GET_SEG(x) \
116 ({ __u32 seg; asm("movl %%" __stringify(x) ",%0" : "=r"(seg)); seg; }) 72 ({ __u32 seg; asm("movl %%" __stringify(x) ",%0" : "=r"(seg)); seg; })
117 73
118/* Assumes current==process to be dumped */ 74/* Assumes current==process to be dumped */
75#undef ELF_CORE_COPY_REGS
119#define ELF_CORE_COPY_REGS(pr_reg, regs) \ 76#define ELF_CORE_COPY_REGS(pr_reg, regs) \
120 pr_reg[0] = regs->rbx; \ 77 pr_reg[0] = regs->rbx; \
121 pr_reg[1] = regs->rcx; \ 78 pr_reg[1] = regs->rcx; \
@@ -135,36 +92,41 @@ struct elf_prpsinfo
135 pr_reg[15] = regs->rsp; \ 92 pr_reg[15] = regs->rsp; \
136 pr_reg[16] = regs->ss; 93 pr_reg[16] = regs->ss;
137 94
138#define user user32 95
96#define elf_prstatus compat_elf_prstatus
97#define elf_prpsinfo compat_elf_prpsinfo
98#define elf_fpregset_t struct user_i387_ia32_struct
99#define elf_fpxregset_t struct user32_fxsr_struct
100#define user user32
139 101
140#undef elf_read_implies_exec 102#undef elf_read_implies_exec
141#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X) 103#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X)
142//#include <asm/ia32.h>
143#include <linux/elf.h>
144
145typedef struct user_i387_ia32_struct elf_fpregset_t;
146typedef struct user32_fxsr_struct elf_fpxregset_t;
147
148 104
149static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs) 105#define elf_core_copy_regs elf32_core_copy_regs
106static inline void elf32_core_copy_regs(compat_elf_gregset_t *elfregs,
107 struct pt_regs *regs)
150{ 108{
151 ELF_CORE_COPY_REGS((*elfregs), regs) 109 ELF_CORE_COPY_REGS((&elfregs->ebx), regs)
152} 110}
153 111
154static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs) 112#define elf_core_copy_task_regs elf32_core_copy_task_regs
113static inline int elf32_core_copy_task_regs(struct task_struct *t,
114 compat_elf_gregset_t* elfregs)
155{ 115{
156 struct pt_regs *pp = task_pt_regs(t); 116 struct pt_regs *pp = task_pt_regs(t);
157 ELF_CORE_COPY_REGS((*elfregs), pp); 117 ELF_CORE_COPY_REGS((&elfregs->ebx), pp);
158 /* fix wrong segments */ 118 /* fix wrong segments */
159 (*elfregs)[7] = t->thread.ds; 119 elfregs->ds = t->thread.ds;
160 (*elfregs)[9] = t->thread.fsindex; 120 elfregs->fs = t->thread.fsindex;
161 (*elfregs)[10] = t->thread.gsindex; 121 elfregs->gs = t->thread.gsindex;
162 (*elfregs)[8] = t->thread.es; 122 elfregs->es = t->thread.es;
163 return 1; 123 return 1;
164} 124}
165 125
126#define elf_core_copy_task_fpregs elf32_core_copy_task_fpregs
166static inline int 127static inline int
167elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpregset_t *fpu) 128elf32_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs,
129 elf_fpregset_t *fpu)
168{ 130{
169 struct _fpstate_ia32 *fpstate = (void*)fpu; 131 struct _fpstate_ia32 *fpstate = (void*)fpu;
170 mm_segment_t oldfs = get_fs(); 132 mm_segment_t oldfs = get_fs();
@@ -186,8 +148,9 @@ elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpr
186 148
187#define ELF_CORE_COPY_XFPREGS 1 149#define ELF_CORE_COPY_XFPREGS 1
188#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG 150#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG
151#define elf_core_copy_task_xfpregs elf32_core_copy_task_xfpregs
189static inline int 152static inline int
190elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu) 153elf32_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
191{ 154{
192 struct pt_regs *regs = task_pt_regs(t); 155 struct pt_regs *regs = task_pt_regs(t);
193 if (!tsk_used_math(t)) 156 if (!tsk_used_math(t))
@@ -206,6 +169,10 @@ elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
206 169
207extern int force_personality32; 170extern int force_personality32;
208 171
172#undef ELF_EXEC_PAGESIZE
173#undef ELF_HWCAP
174#undef ELF_PLATFORM
175#undef SET_PERSONALITY
209#define ELF_EXEC_PAGESIZE PAGE_SIZE 176#define ELF_EXEC_PAGESIZE PAGE_SIZE
210#define ELF_HWCAP (boot_cpu_data.x86_capability[0]) 177#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
211#define ELF_PLATFORM ("i686") 178#define ELF_PLATFORM ("i686")
@@ -231,6 +198,7 @@ do { \
231 198
232#define load_elf_binary load_elf32_binary 199#define load_elf_binary load_elf32_binary
233 200
201#undef ELF_PLAT_INIT
234#define ELF_PLAT_INIT(r, load_addr) elf32_init(r) 202#define ELF_PLAT_INIT(r, load_addr) elf32_init(r)
235 203
236#undef start_thread 204#undef start_thread
diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32
index a3fa11f8f460..ccea590bbb92 100644
--- a/arch/x86/kernel/Makefile_32
+++ b/arch/x86/kernel/Makefile_32
@@ -2,7 +2,7 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5extra-y := head_32.o init_task_32.o vmlinux.lds 5extra-y := head_32.o init_task.o vmlinux.lds
6 6
7obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \ 7obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \
8 ptrace_32.o time_32.o ioport_32.o ldt_32.o setup_32.o i8259_32.o sys_i386_32.o \ 8 ptrace_32.o time_32.o ioport_32.o ldt_32.o setup_32.o i8259_32.o sys_i386_32.o \
@@ -17,6 +17,7 @@ obj-$(CONFIG_MCA) += mca_32.o
17obj-$(CONFIG_X86_MSR) += msr.o 17obj-$(CONFIG_X86_MSR) += msr.o
18obj-$(CONFIG_X86_CPUID) += cpuid.o 18obj-$(CONFIG_X86_CPUID) += cpuid.o
19obj-$(CONFIG_MICROCODE) += microcode.o 19obj-$(CONFIG_MICROCODE) += microcode.o
20obj-$(CONFIG_PCI) += early-quirks.o
20obj-$(CONFIG_APM) += apm_32.o 21obj-$(CONFIG_APM) += apm_32.o
21obj-$(CONFIG_X86_SMP) += smp_32.o smpboot_32.o tsc_sync.o 22obj-$(CONFIG_X86_SMP) += smp_32.o smpboot_32.o tsc_sync.o
22obj-$(CONFIG_SMP) += smpcommon_32.o 23obj-$(CONFIG_SMP) += smpcommon_32.o
diff --git a/arch/x86/kernel/Makefile_64 b/arch/x86/kernel/Makefile_64
index 43da66213a47..dec06e769281 100644
--- a/arch/x86/kernel/Makefile_64
+++ b/arch/x86/kernel/Makefile_64
@@ -2,7 +2,7 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5extra-y := head_64.o head64.o init_task_64.o vmlinux.lds 5extra-y := head_64.o head64.o init_task.o vmlinux.lds
6EXTRA_AFLAGS := -traditional 6EXTRA_AFLAGS := -traditional
7obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \ 7obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \
8 ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \ 8 ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \
@@ -39,7 +39,7 @@ obj-$(CONFIG_K8_NB) += k8.o
39obj-$(CONFIG_AUDIT) += audit_64.o 39obj-$(CONFIG_AUDIT) += audit_64.o
40 40
41obj-$(CONFIG_MODULES) += module_64.o 41obj-$(CONFIG_MODULES) += module_64.o
42obj-$(CONFIG_PCI) += early-quirks_64.o 42obj-$(CONFIG_PCI) += early-quirks.o
43 43
44obj-y += topology.o 44obj-y += topology.o
45obj-y += intel_cacheinfo.o 45obj-y += intel_cacheinfo.o
diff --git a/arch/x86/kernel/acpi/Makefile_32 b/arch/x86/kernel/acpi/Makefile_32
index a4852a2e9190..045dd54b33e0 100644
--- a/arch/x86/kernel/acpi/Makefile_32
+++ b/arch/x86/kernel/acpi/Makefile_32
@@ -1,7 +1,4 @@
1obj-$(CONFIG_ACPI) += boot.o 1obj-$(CONFIG_ACPI) += boot.o
2ifneq ($(CONFIG_PCI),)
3obj-$(CONFIG_X86_IO_APIC) += earlyquirk_32.o
4endif
5obj-$(CONFIG_ACPI_SLEEP) += sleep_32.o wakeup_32.o 2obj-$(CONFIG_ACPI_SLEEP) += sleep_32.o wakeup_32.o
6 3
7ifneq ($(CONFIG_ACPI_PROCESSOR),) 4ifneq ($(CONFIG_ACPI_PROCESSOR),)
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index afd2afe9102d..f28b2e251b1d 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -555,7 +555,7 @@ EXPORT_SYMBOL(acpi_map_lsapic);
555 555
556int acpi_unmap_lsapic(int cpu) 556int acpi_unmap_lsapic(int cpu)
557{ 557{
558 x86_cpu_to_apicid[cpu] = -1; 558 per_cpu(x86_cpu_to_apicid, cpu) = -1;
559 cpu_clear(cpu, cpu_present_map); 559 cpu_clear(cpu, cpu_present_map);
560 num_processors--; 560 num_processors--;
561 561
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 2d39f55d29a8..10b67170b133 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -29,7 +29,7 @@
29void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, 29void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
30 unsigned int cpu) 30 unsigned int cpu)
31{ 31{
32 struct cpuinfo_x86 *c = cpu_data + cpu; 32 struct cpuinfo_x86 *c = &cpu_data(cpu);
33 33
34 flags->bm_check = 0; 34 flags->bm_check = 0;
35 if (num_online_cpus() == 1) 35 if (num_online_cpus() == 1)
@@ -72,7 +72,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
72 struct acpi_processor_cx *cx, struct acpi_power_register *reg) 72 struct acpi_processor_cx *cx, struct acpi_power_register *reg)
73{ 73{
74 struct cstate_entry *percpu_entry; 74 struct cstate_entry *percpu_entry;
75 struct cpuinfo_x86 *c = cpu_data + cpu; 75 struct cpuinfo_x86 *c = &cpu_data(cpu);
76 76
77 cpumask_t saved_mask; 77 cpumask_t saved_mask;
78 int retval; 78 int retval;
diff --git a/arch/x86/kernel/acpi/earlyquirk_32.c b/arch/x86/kernel/acpi/earlyquirk_32.c
deleted file mode 100644
index 23f78efc577d..000000000000
--- a/arch/x86/kernel/acpi/earlyquirk_32.c
+++ /dev/null
@@ -1,84 +0,0 @@
1/*
2 * Do early PCI probing for bug detection when the main PCI subsystem is
3 * not up yet.
4 */
5#include <linux/init.h>
6#include <linux/kernel.h>
7#include <linux/pci.h>
8#include <linux/acpi.h>
9
10#include <asm/pci-direct.h>
11#include <asm/acpi.h>
12#include <asm/apic.h>
13
14#ifdef CONFIG_ACPI
15
16static int __init nvidia_hpet_check(struct acpi_table_header *header)
17{
18 return 0;
19}
20#endif
21
22static int __init check_bridge(int vendor, int device)
23{
24#ifdef CONFIG_ACPI
25 static int warned;
26 /* According to Nvidia all timer overrides are bogus unless HPET
27 is enabled. */
28 if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
29 if (!warned && acpi_table_parse(ACPI_SIG_HPET,
30 nvidia_hpet_check)) {
31 warned = 1;
32 acpi_skip_timer_override = 1;
33 printk(KERN_INFO "Nvidia board "
34 "detected. Ignoring ACPI "
35 "timer override.\n");
36 printk(KERN_INFO "If you got timer trouble "
37 "try acpi_use_timer_override\n");
38
39 }
40 }
41#endif
42 if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {
43 timer_over_8254 = 0;
44 printk(KERN_INFO "ATI board detected. Disabling timer routing "
45 "over 8254.\n");
46 }
47 return 0;
48}
49
50void __init check_acpi_pci(void)
51{
52 int num, slot, func;
53
54 /* Assume the machine supports type 1. If not it will
55 always read ffffffff and should not have any side effect.
56 Actually a few buggy systems can machine check. Allow the user
57 to disable it by command line option at least -AK */
58 if (!early_pci_allowed())
59 return;
60
61 /* Poor man's PCI discovery */
62 for (num = 0; num < 32; num++) {
63 for (slot = 0; slot < 32; slot++) {
64 for (func = 0; func < 8; func++) {
65 u32 class;
66 u32 vendor;
67 class = read_pci_config(num, slot, func,
68 PCI_CLASS_REVISION);
69 if (class == 0xffffffff)
70 break;
71
72 if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
73 continue;
74
75 vendor = read_pci_config(num, slot, func,
76 PCI_VENDOR_ID);
77
78 if (check_bridge(vendor & 0xffff, vendor >> 16))
79 return;
80 }
81
82 }
83 }
84}
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c
index b54fded49834..2ed0a4ce62f0 100644
--- a/arch/x86/kernel/acpi/processor.c
+++ b/arch/x86/kernel/acpi/processor.c
@@ -63,7 +63,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
63void arch_acpi_processor_init_pdc(struct acpi_processor *pr) 63void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
64{ 64{
65 unsigned int cpu = pr->id; 65 unsigned int cpu = pr->id;
66 struct cpuinfo_x86 *c = cpu_data + cpu; 66 struct cpuinfo_x86 *c = &cpu_data(cpu);
67 67
68 pr->pdc = NULL; 68 pr->pdc = NULL;
69 if (c->x86_vendor == X86_VENDOR_INTEL) 69 if (c->x86_vendor == X86_VENDOR_INTEL)
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 3bd2688bd443..d6405e0842b5 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -357,14 +357,14 @@ void alternatives_smp_switch(int smp)
357 if (smp) { 357 if (smp) {
358 printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); 358 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
359 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); 359 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
360 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); 360 clear_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
361 list_for_each_entry(mod, &smp_alt_modules, next) 361 list_for_each_entry(mod, &smp_alt_modules, next)
362 alternatives_smp_lock(mod->locks, mod->locks_end, 362 alternatives_smp_lock(mod->locks, mod->locks_end,
363 mod->text, mod->text_end); 363 mod->text, mod->text_end);
364 } else { 364 } else {
365 printk(KERN_INFO "SMP alternatives: switching to UP code\n"); 365 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
366 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); 366 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
367 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); 367 set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
368 list_for_each_entry(mod, &smp_alt_modules, next) 368 list_for_each_entry(mod, &smp_alt_modules, next)
369 alternatives_smp_unlock(mod->locks, mod->locks_end, 369 alternatives_smp_unlock(mod->locks, mod->locks_end,
370 mod->text, mod->text_end); 370 mod->text, mod->text_end);
@@ -432,7 +432,7 @@ void __init alternative_instructions(void)
432 if (1 == num_possible_cpus()) { 432 if (1 == num_possible_cpus()) {
433 printk(KERN_INFO "SMP alternatives: switching to UP code\n"); 433 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
434 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); 434 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
435 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); 435 set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
436 alternatives_smp_unlock(__smp_locks, __smp_locks_end, 436 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
437 _text, _etext); 437 _text, _etext);
438 } 438 }
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig_32
index d8c6f132dc7a..d8c6f132dc7a 100644
--- a/arch/x86/kernel/cpu/cpufreq/Kconfig
+++ b/arch/x86/kernel/cpu/cpufreq/Kconfig_32
diff --git a/arch/x86/kernel/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig_64
index a3fd51926cbd..9c9699fdcf52 100644
--- a/arch/x86/kernel/cpufreq/Kconfig
+++ b/arch/x86/kernel/cpu/cpufreq/Kconfig_64
@@ -19,7 +19,7 @@ config X86_POWERNOW_K8
19 To compile this driver as a module, choose M here: the 19 To compile this driver as a module, choose M here: the
20 module will be called powernow-k8. 20 module will be called powernow-k8.
21 21
22 For details, take a look at <file:Documentation/cpu-freq/>. 22 For details, take a look at <file:Documentation/cpu-freq/>.
23 23
24 If in doubt, say N. 24 If in doubt, say N.
25 25
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 2ca43ba32bc0..fea0af0476b9 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -77,7 +77,7 @@ static unsigned int acpi_pstate_strict;
77 77
78static int check_est_cpu(unsigned int cpuid) 78static int check_est_cpu(unsigned int cpuid)
79{ 79{
80 struct cpuinfo_x86 *cpu = &cpu_data[cpuid]; 80 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
81 81
82 if (cpu->x86_vendor != X86_VENDOR_INTEL || 82 if (cpu->x86_vendor != X86_VENDOR_INTEL ||
83 !cpu_has(cpu, X86_FEATURE_EST)) 83 !cpu_has(cpu, X86_FEATURE_EST))
@@ -560,7 +560,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
560 unsigned int cpu = policy->cpu; 560 unsigned int cpu = policy->cpu;
561 struct acpi_cpufreq_data *data; 561 struct acpi_cpufreq_data *data;
562 unsigned int result = 0; 562 unsigned int result = 0;
563 struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; 563 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
564 struct acpi_processor_performance *perf; 564 struct acpi_processor_performance *perf;
565 565
566 dprintk("acpi_cpufreq_cpu_init\n"); 566 dprintk("acpi_cpufreq_cpu_init\n");
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
index c11baaf9f2b4..326a4c81f684 100644
--- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
+++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
@@ -305,7 +305,7 @@ static struct cpufreq_driver eps_driver = {
305 305
306static int __init eps_init(void) 306static int __init eps_init(void)
307{ 307{
308 struct cpuinfo_x86 *c = cpu_data; 308 struct cpuinfo_x86 *c = &cpu_data(0);
309 309
310 /* This driver will work only on Centaur C7 processors with 310 /* This driver will work only on Centaur C7 processors with
311 * Enhanced SpeedStep/PowerSaver registers */ 311 * Enhanced SpeedStep/PowerSaver registers */
diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c
index 1e7ae7dafcf6..94619c22f563 100644
--- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/elanfreq.c
@@ -199,7 +199,7 @@ static int elanfreq_target (struct cpufreq_policy *policy,
199 199
200static int elanfreq_cpu_init(struct cpufreq_policy *policy) 200static int elanfreq_cpu_init(struct cpufreq_policy *policy)
201{ 201{
202 struct cpuinfo_x86 *c = cpu_data; 202 struct cpuinfo_x86 *c = &cpu_data(0);
203 unsigned int i; 203 unsigned int i;
204 int result; 204 int result;
205 205
@@ -280,7 +280,7 @@ static struct cpufreq_driver elanfreq_driver = {
280 280
281static int __init elanfreq_init(void) 281static int __init elanfreq_init(void)
282{ 282{
283 struct cpuinfo_x86 *c = cpu_data; 283 struct cpuinfo_x86 *c = &cpu_data(0);
284 284
285 /* Test if we have the right hardware */ 285 /* Test if we have the right hardware */
286 if ((c->x86_vendor != X86_VENDOR_AMD) || 286 if ((c->x86_vendor != X86_VENDOR_AMD) ||
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
index 5045f5d583c8..749d00cb2ebd 100644
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
@@ -780,7 +780,7 @@ static int longhaul_setup_southbridge(void)
780 780
781static int __init longhaul_cpu_init(struct cpufreq_policy *policy) 781static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
782{ 782{
783 struct cpuinfo_x86 *c = cpu_data; 783 struct cpuinfo_x86 *c = &cpu_data(0);
784 char *cpuname=NULL; 784 char *cpuname=NULL;
785 int ret; 785 int ret;
786 u32 lo, hi; 786 u32 lo, hi;
@@ -959,7 +959,7 @@ static struct cpufreq_driver longhaul_driver = {
959 959
960static int __init longhaul_init(void) 960static int __init longhaul_init(void)
961{ 961{
962 struct cpuinfo_x86 *c = cpu_data; 962 struct cpuinfo_x86 *c = &cpu_data(0);
963 963
964 if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) 964 if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6)
965 return -ENODEV; 965 return -ENODEV;
diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c
index b2689514295a..af4a867a097c 100644
--- a/arch/x86/kernel/cpu/cpufreq/longrun.c
+++ b/arch/x86/kernel/cpu/cpufreq/longrun.c
@@ -172,7 +172,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
172 u32 save_lo, save_hi; 172 u32 save_lo, save_hi;
173 u32 eax, ebx, ecx, edx; 173 u32 eax, ebx, ecx, edx;
174 u32 try_hi; 174 u32 try_hi;
175 struct cpuinfo_x86 *c = cpu_data; 175 struct cpuinfo_x86 *c = &cpu_data(0);
176 176
177 if (!low_freq || !high_freq) 177 if (!low_freq || !high_freq)
178 return -EINVAL; 178 return -EINVAL;
@@ -298,7 +298,7 @@ static struct cpufreq_driver longrun_driver = {
298 */ 298 */
299static int __init longrun_init(void) 299static int __init longrun_init(void)
300{ 300{
301 struct cpuinfo_x86 *c = cpu_data; 301 struct cpuinfo_x86 *c = &cpu_data(0);
302 302
303 if (c->x86_vendor != X86_VENDOR_TRANSMETA || 303 if (c->x86_vendor != X86_VENDOR_TRANSMETA ||
304 !cpu_has(c, X86_FEATURE_LONGRUN)) 304 !cpu_has(c, X86_FEATURE_LONGRUN))
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index 793eae854f4f..14791ec55cfd 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -195,7 +195,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
195 195
196static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) 196static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
197{ 197{
198 struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; 198 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
199 int cpuid = 0; 199 int cpuid = 0;
200 unsigned int i; 200 unsigned int i;
201 201
@@ -279,7 +279,7 @@ static struct cpufreq_driver p4clockmod_driver = {
279 279
280static int __init cpufreq_p4_init(void) 280static int __init cpufreq_p4_init(void)
281{ 281{
282 struct cpuinfo_x86 *c = cpu_data; 282 struct cpuinfo_x86 *c = &cpu_data(0);
283 int ret; 283 int ret;
284 284
285 /* 285 /*
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
index 6d0285339317..42405b4e34ed 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
@@ -215,7 +215,7 @@ static struct cpufreq_driver powernow_k6_driver = {
215 */ 215 */
216static int __init powernow_k6_init(void) 216static int __init powernow_k6_init(void)
217{ 217{
218 struct cpuinfo_x86 *c = cpu_data; 218 struct cpuinfo_x86 *c = &cpu_data(0);
219 219
220 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) || 220 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) ||
221 ((c->x86_model != 12) && (c->x86_model != 13))) 221 ((c->x86_model != 12) && (c->x86_model != 13)))
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
index f3686a5f2308..b5a9863d6cdc 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
@@ -114,7 +114,7 @@ static int check_fsb(unsigned int fsbspeed)
114 114
115static int check_powernow(void) 115static int check_powernow(void)
116{ 116{
117 struct cpuinfo_x86 *c = cpu_data; 117 struct cpuinfo_x86 *c = &cpu_data(0);
118 unsigned int maxei, eax, ebx, ecx, edx; 118 unsigned int maxei, eax, ebx, ecx, edx;
119 119
120 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 !=6)) { 120 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 !=6)) {
diff --git a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c
index d9f3e90a7ae0..42da9bd677d6 100644
--- a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c
+++ b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c
@@ -102,7 +102,7 @@ static int sc520_freq_target (struct cpufreq_policy *policy,
102 102
103static int sc520_freq_cpu_init(struct cpufreq_policy *policy) 103static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
104{ 104{
105 struct cpuinfo_x86 *c = cpu_data; 105 struct cpuinfo_x86 *c = &cpu_data(0);
106 int result; 106 int result;
107 107
108 /* capability check */ 108 /* capability check */
@@ -151,7 +151,7 @@ static struct cpufreq_driver sc520_freq_driver = {
151 151
152static int __init sc520_freq_init(void) 152static int __init sc520_freq_init(void)
153{ 153{
154 struct cpuinfo_x86 *c = cpu_data; 154 struct cpuinfo_x86 *c = &cpu_data(0);
155 int err; 155 int err;
156 156
157 /* Test if we have the right hardware */ 157 /* Test if we have the right hardware */
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index 811d47438546..3031f1196192 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -230,7 +230,7 @@ static struct cpu_model models[] =
230 230
231static int centrino_cpu_init_table(struct cpufreq_policy *policy) 231static int centrino_cpu_init_table(struct cpufreq_policy *policy)
232{ 232{
233 struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu]; 233 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
234 struct cpu_model *model; 234 struct cpu_model *model;
235 235
236 for(model = models; model->cpu_id != NULL; model++) 236 for(model = models; model->cpu_id != NULL; model++)
@@ -340,7 +340,7 @@ static unsigned int get_cur_freq(unsigned int cpu)
340 340
341static int centrino_cpu_init(struct cpufreq_policy *policy) 341static int centrino_cpu_init(struct cpufreq_policy *policy)
342{ 342{
343 struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu]; 343 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
344 unsigned freq; 344 unsigned freq;
345 unsigned l, h; 345 unsigned l, h;
346 int ret; 346 int ret;
@@ -612,7 +612,7 @@ static struct cpufreq_driver centrino_driver = {
612 */ 612 */
613static int __init centrino_init(void) 613static int __init centrino_init(void)
614{ 614{
615 struct cpuinfo_x86 *cpu = cpu_data; 615 struct cpuinfo_x86 *cpu = &cpu_data(0);
616 616
617 if (!cpu_has(cpu, X86_FEATURE_EST)) 617 if (!cpu_has(cpu, X86_FEATURE_EST))
618 return -ENODEV; 618 return -ENODEV;
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
index b1acc8ce3167..76c3ab0da468 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
@@ -228,7 +228,7 @@ EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency);
228 228
229unsigned int speedstep_detect_processor (void) 229unsigned int speedstep_detect_processor (void)
230{ 230{
231 struct cpuinfo_x86 *c = cpu_data; 231 struct cpuinfo_x86 *c = &cpu_data(0);
232 u32 ebx, msr_lo, msr_hi; 232 u32 ebx, msr_lo, msr_hi;
233 233
234 dprintk("x86: %x, model: %x\n", c->x86, c->x86_model); 234 dprintk("x86: %x, model: %x\n", c->x86, c->x86_model);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 297a24116949..9921b01fe199 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -295,7 +295,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
295 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ 295 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
296 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; 296 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
297#ifdef CONFIG_X86_HT 297#ifdef CONFIG_X86_HT
298 unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data); 298 unsigned int cpu = c->cpu_index;
299#endif 299#endif
300 300
301 if (c->cpuid_level > 3) { 301 if (c->cpuid_level > 3) {
@@ -417,14 +417,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
417 if (new_l2) { 417 if (new_l2) {
418 l2 = new_l2; 418 l2 = new_l2;
419#ifdef CONFIG_X86_HT 419#ifdef CONFIG_X86_HT
420 cpu_llc_id[cpu] = l2_id; 420 per_cpu(cpu_llc_id, cpu) = l2_id;
421#endif 421#endif
422 } 422 }
423 423
424 if (new_l3) { 424 if (new_l3) {
425 l3 = new_l3; 425 l3 = new_l3;
426#ifdef CONFIG_X86_HT 426#ifdef CONFIG_X86_HT
427 cpu_llc_id[cpu] = l3_id; 427 per_cpu(cpu_llc_id, cpu) = l3_id;
428#endif 428#endif
429 } 429 }
430 430
@@ -459,7 +459,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
459 struct _cpuid4_info *this_leaf, *sibling_leaf; 459 struct _cpuid4_info *this_leaf, *sibling_leaf;
460 unsigned long num_threads_sharing; 460 unsigned long num_threads_sharing;
461 int index_msb, i; 461 int index_msb, i;
462 struct cpuinfo_x86 *c = cpu_data; 462 struct cpuinfo_x86 *c = &cpu_data(cpu);
463 463
464 this_leaf = CPUID4_INFO_IDX(cpu, index); 464 this_leaf = CPUID4_INFO_IDX(cpu, index);
465 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; 465 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
@@ -470,8 +470,8 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
470 index_msb = get_count_order(num_threads_sharing); 470 index_msb = get_count_order(num_threads_sharing);
471 471
472 for_each_online_cpu(i) { 472 for_each_online_cpu(i) {
473 if (c[i].apicid >> index_msb == 473 if (cpu_data(i).apicid >> index_msb ==
474 c[cpu].apicid >> index_msb) { 474 c->apicid >> index_msb) {
475 cpu_set(i, this_leaf->shared_cpu_map); 475 cpu_set(i, this_leaf->shared_cpu_map);
476 if (i != cpu && cpuid4_info[i]) { 476 if (i != cpu && cpuid4_info[i]) {
477 sibling_leaf = CPUID4_INFO_IDX(i, index); 477 sibling_leaf = CPUID4_INFO_IDX(i, index);
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 54cdbf1a40f1..c02541e6e653 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -120,7 +120,9 @@ int reserve_perfctr_nmi(unsigned int msr)
120 unsigned int counter; 120 unsigned int counter;
121 121
122 counter = nmi_perfctr_msr_to_bit(msr); 122 counter = nmi_perfctr_msr_to_bit(msr);
123 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 123 /* register not managed by the allocator? */
124 if (counter > NMI_MAX_COUNTER_BITS)
125 return 1;
124 126
125 if (!test_and_set_bit(counter, perfctr_nmi_owner)) 127 if (!test_and_set_bit(counter, perfctr_nmi_owner))
126 return 1; 128 return 1;
@@ -132,7 +134,9 @@ void release_perfctr_nmi(unsigned int msr)
132 unsigned int counter; 134 unsigned int counter;
133 135
134 counter = nmi_perfctr_msr_to_bit(msr); 136 counter = nmi_perfctr_msr_to_bit(msr);
135 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 137 /* register not managed by the allocator? */
138 if (counter > NMI_MAX_COUNTER_BITS)
139 return;
136 140
137 clear_bit(counter, perfctr_nmi_owner); 141 clear_bit(counter, perfctr_nmi_owner);
138} 142}
@@ -142,7 +146,9 @@ int reserve_evntsel_nmi(unsigned int msr)
142 unsigned int counter; 146 unsigned int counter;
143 147
144 counter = nmi_evntsel_msr_to_bit(msr); 148 counter = nmi_evntsel_msr_to_bit(msr);
145 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 149 /* register not managed by the allocator? */
150 if (counter > NMI_MAX_COUNTER_BITS)
151 return 1;
146 152
147 if (!test_and_set_bit(counter, evntsel_nmi_owner)) 153 if (!test_and_set_bit(counter, evntsel_nmi_owner))
148 return 1; 154 return 1;
@@ -154,7 +160,9 @@ void release_evntsel_nmi(unsigned int msr)
154 unsigned int counter; 160 unsigned int counter;
155 161
156 counter = nmi_evntsel_msr_to_bit(msr); 162 counter = nmi_evntsel_msr_to_bit(msr);
157 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 163 /* register not managed by the allocator? */
164 if (counter > NMI_MAX_COUNTER_BITS)
165 return;
158 166
159 clear_bit(counter, evntsel_nmi_owner); 167 clear_bit(counter, evntsel_nmi_owner);
160} 168}
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 879a0f789b1e..2d42b414b777 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -85,12 +85,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
85 /* nothing */ 85 /* nothing */
86 }; 86 };
87 struct cpuinfo_x86 *c = v; 87 struct cpuinfo_x86 *c = v;
88 int i, n = c - cpu_data; 88 int i, n = 0;
89 int fpu_exception; 89 int fpu_exception;
90 90
91#ifdef CONFIG_SMP 91#ifdef CONFIG_SMP
92 if (!cpu_online(n)) 92 if (!cpu_online(n))
93 return 0; 93 return 0;
94 n = c->cpu_index;
94#endif 95#endif
95 seq_printf(m, "processor\t: %d\n" 96 seq_printf(m, "processor\t: %d\n"
96 "vendor_id\t: %s\n" 97 "vendor_id\t: %s\n"
@@ -175,11 +176,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
175 176
176static void *c_start(struct seq_file *m, loff_t *pos) 177static void *c_start(struct seq_file *m, loff_t *pos)
177{ 178{
178 return *pos < NR_CPUS ? cpu_data + *pos : NULL; 179 if (*pos == 0) /* just in case, cpu 0 is not the first */
180 *pos = first_cpu(cpu_possible_map);
181 if ((*pos) < NR_CPUS && cpu_possible(*pos))
182 return &cpu_data(*pos);
183 return NULL;
179} 184}
180static void *c_next(struct seq_file *m, void *v, loff_t *pos) 185static void *c_next(struct seq_file *m, void *v, loff_t *pos)
181{ 186{
182 ++*pos; 187 *pos = next_cpu(*pos, cpu_possible_map);
183 return c_start(m, pos); 188 return c_start(m, pos);
184} 189}
185static void c_stop(struct seq_file *m, void *v) 190static void c_stop(struct seq_file *m, void *v)
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 70dcf912d9fb..05c9936a16cc 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -114,7 +114,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
114static int cpuid_open(struct inode *inode, struct file *file) 114static int cpuid_open(struct inode *inode, struct file *file)
115{ 115{
116 unsigned int cpu = iminor(file->f_path.dentry->d_inode); 116 unsigned int cpu = iminor(file->f_path.dentry->d_inode);
117 struct cpuinfo_x86 *c = &(cpu_data)[cpu]; 117 struct cpuinfo_x86 *c = &cpu_data(cpu);
118 118
119 if (cpu >= NR_CPUS || !cpu_online(cpu)) 119 if (cpu >= NR_CPUS || !cpu_online(cpu))
120 return -ENXIO; /* No such CPU */ 120 return -ENXIO; /* No such CPU */
@@ -134,15 +134,18 @@ static const struct file_operations cpuid_fops = {
134 .open = cpuid_open, 134 .open = cpuid_open,
135}; 135};
136 136
137static int __cpuinit cpuid_device_create(int i) 137static __cpuinit int cpuid_device_create(int cpu)
138{ 138{
139 int err = 0;
140 struct device *dev; 139 struct device *dev;
141 140
142 dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, i), "cpu%d",i); 141 dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu),
143 if (IS_ERR(dev)) 142 "cpu%d", cpu);
144 err = PTR_ERR(dev); 143 return IS_ERR(dev) ? PTR_ERR(dev) : 0;
145 return err; 144}
145
146static void cpuid_device_destroy(int cpu)
147{
148 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
146} 149}
147 150
148static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, 151static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
@@ -150,18 +153,21 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
150 void *hcpu) 153 void *hcpu)
151{ 154{
152 unsigned int cpu = (unsigned long)hcpu; 155 unsigned int cpu = (unsigned long)hcpu;
156 int err = 0;
153 157
154 switch (action) { 158 switch (action) {
155 case CPU_ONLINE: 159 case CPU_UP_PREPARE:
156 case CPU_ONLINE_FROZEN: 160 case CPU_UP_PREPARE_FROZEN:
157 cpuid_device_create(cpu); 161 err = cpuid_device_create(cpu);
158 break; 162 break;
163 case CPU_UP_CANCELED:
164 case CPU_UP_CANCELED_FROZEN:
159 case CPU_DEAD: 165 case CPU_DEAD:
160 case CPU_DEAD_FROZEN: 166 case CPU_DEAD_FROZEN:
161 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); 167 cpuid_device_destroy(cpu);
162 break; 168 break;
163 } 169 }
164 return NOTIFY_OK; 170 return err ? NOTIFY_BAD : NOTIFY_OK;
165} 171}
166 172
167static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier = 173static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier =
@@ -198,7 +204,7 @@ static int __init cpuid_init(void)
198out_class: 204out_class:
199 i = 0; 205 i = 0;
200 for_each_online_cpu(i) { 206 for_each_online_cpu(i) {
201 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, i)); 207 cpuid_device_destroy(i);
202 } 208 }
203 class_destroy(cpuid_class); 209 class_destroy(cpuid_class);
204out_chrdev: 210out_chrdev:
@@ -212,7 +218,7 @@ static void __exit cpuid_exit(void)
212 int cpu = 0; 218 int cpu = 0;
213 219
214 for_each_online_cpu(cpu) 220 for_each_online_cpu(cpu)
215 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); 221 cpuid_device_destroy(cpu);
216 class_destroy(cpuid_class); 222 class_destroy(cpuid_class);
217 unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); 223 unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
218 unregister_hotcpu_notifier(&cpuid_class_cpu_notifier); 224 unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
diff --git a/arch/x86/kernel/early-quirks_64.c b/arch/x86/kernel/early-quirks.c
index 13aa4fd728f3..dc34acbd54aa 100644
--- a/arch/x86/kernel/early-quirks_64.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -13,9 +13,13 @@
13#include <linux/acpi.h> 13#include <linux/acpi.h>
14#include <linux/pci_ids.h> 14#include <linux/pci_ids.h>
15#include <asm/pci-direct.h> 15#include <asm/pci-direct.h>
16#include <asm/proto.h>
17#include <asm/iommu.h>
18#include <asm/dma.h> 16#include <asm/dma.h>
17#include <asm/io_apic.h>
18#include <asm/apic.h>
19
20#ifdef CONFIG_IOMMU
21#include <asm/iommu.h>
22#endif
19 23
20static void __init via_bugs(void) 24static void __init via_bugs(void)
21{ 25{
@@ -23,7 +27,8 @@ static void __init via_bugs(void)
23 if ((end_pfn > MAX_DMA32_PFN || force_iommu) && 27 if ((end_pfn > MAX_DMA32_PFN || force_iommu) &&
24 !iommu_aperture_allowed) { 28 !iommu_aperture_allowed) {
25 printk(KERN_INFO 29 printk(KERN_INFO
26 "Looks like a VIA chipset. Disabling IOMMU. Override with iommu=allowed\n"); 30 "Looks like a VIA chipset. Disabling IOMMU."
31 " Override with iommu=allowed\n");
27 iommu_aperture_disabled = 1; 32 iommu_aperture_disabled = 1;
28 } 33 }
29#endif 34#endif
@@ -40,6 +45,7 @@ static int __init nvidia_hpet_check(struct acpi_table_header *header)
40static void __init nvidia_bugs(void) 45static void __init nvidia_bugs(void)
41{ 46{
42#ifdef CONFIG_ACPI 47#ifdef CONFIG_ACPI
48#ifdef CONFIG_X86_IO_APIC
43 /* 49 /*
44 * All timer overrides on Nvidia are 50 * All timer overrides on Nvidia are
45 * wrong unless HPET is enabled. 51 * wrong unless HPET is enabled.
@@ -59,17 +65,20 @@ static void __init nvidia_bugs(void)
59 "try acpi_use_timer_override\n"); 65 "try acpi_use_timer_override\n");
60 } 66 }
61#endif 67#endif
68#endif
62 /* RED-PEN skip them on mptables too? */ 69 /* RED-PEN skip them on mptables too? */
63 70
64} 71}
65 72
66static void __init ati_bugs(void) 73static void __init ati_bugs(void)
67{ 74{
75#ifdef CONFIG_X86_IO_APIC
68 if (timer_over_8254 == 1) { 76 if (timer_over_8254 == 1) {
69 timer_over_8254 = 0; 77 timer_over_8254 = 0;
70 printk(KERN_INFO 78 printk(KERN_INFO
71 "ATI board detected. Disabling timer routing over 8254.\n"); 79 "ATI board detected. Disabling timer routing over 8254.\n");
72 } 80 }
81#endif
73} 82}
74 83
75struct chipset { 84struct chipset {
@@ -104,7 +113,7 @@ void __init early_quirks(void)
104 if (class == 0xffffffff) 113 if (class == 0xffffffff)
105 break; 114 break;
106 115
107 if ((class >> 16) != PCI_CLASS_BRIDGE_PCI) 116 if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
108 continue; 117 continue;
109 118
110 vendor = read_pci_config(num, slot, func, 119 vendor = read_pci_config(num, slot, func,
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
index 4ae03e3e8294..ce703e21c912 100644
--- a/arch/x86/kernel/genapic_64.c
+++ b/arch/x86/kernel/genapic_64.c
@@ -24,10 +24,19 @@
24#include <acpi/acpi_bus.h> 24#include <acpi/acpi_bus.h>
25#endif 25#endif
26 26
27/* which logical CPU number maps to which CPU (physical APIC ID) */ 27/*
28u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly 28 * which logical CPU number maps to which CPU (physical APIC ID)
29 *
30 * The following static array is used during kernel startup
31 * and the x86_cpu_to_apicid_ptr contains the address of the
32 * array during this time. Is it zeroed when the per_cpu
33 * data area is removed.
34 */
35u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata
29 = { [0 ... NR_CPUS-1] = BAD_APICID }; 36 = { [0 ... NR_CPUS-1] = BAD_APICID };
30EXPORT_SYMBOL(x86_cpu_to_apicid); 37void *x86_cpu_to_apicid_ptr;
38DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
39EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
31 40
32struct genapic __read_mostly *genapic = &apic_flat; 41struct genapic __read_mostly *genapic = &apic_flat;
33 42
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index 91c7526768ee..07352b74bda6 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -172,7 +172,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
172 */ 172 */
173 cpu = first_cpu(cpumask); 173 cpu = first_cpu(cpumask);
174 if ((unsigned)cpu < NR_CPUS) 174 if ((unsigned)cpu < NR_CPUS)
175 return x86_cpu_to_apicid[cpu]; 175 return per_cpu(x86_cpu_to_apicid, cpu);
176 else 176 else
177 return BAD_APICID; 177 return BAD_APICID;
178} 178}
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index a7eee0a4751d..6b3469311e42 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -58,7 +58,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
58 58
59 for (i = 0; i < IDT_ENTRIES; i++) 59 for (i = 0; i < IDT_ENTRIES; i++)
60 set_intr_gate(i, early_idt_handler); 60 set_intr_gate(i, early_idt_handler);
61 asm volatile("lidt %0" :: "m" (idt_descr)); 61 load_idt((const struct desc_ptr *)&idt_descr);
62 62
63 early_printk("Kernel alive\n"); 63 early_printk("Kernel alive\n");
64 64
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index f8367074da0d..22d8f00c80dc 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -69,12 +69,15 @@ static inline void hpet_clear_mapping(void)
69 * HPET command line enable / disable 69 * HPET command line enable / disable
70 */ 70 */
71static int boot_hpet_disable; 71static int boot_hpet_disable;
72int hpet_force_user;
72 73
73static int __init hpet_setup(char* str) 74static int __init hpet_setup(char* str)
74{ 75{
75 if (str) { 76 if (str) {
76 if (!strncmp("disable", str, 7)) 77 if (!strncmp("disable", str, 7))
77 boot_hpet_disable = 1; 78 boot_hpet_disable = 1;
79 if (!strncmp("force", str, 5))
80 hpet_force_user = 1;
78 } 81 }
79 return 1; 82 return 1;
80} 83}
diff --git a/arch/x86/kernel/i8259_32.c b/arch/x86/kernel/i8259_32.c
index d34a10cc13a7..f634fc715c99 100644
--- a/arch/x86/kernel/i8259_32.c
+++ b/arch/x86/kernel/i8259_32.c
@@ -403,7 +403,8 @@ void __init native_init_IRQ(void)
403 int vector = FIRST_EXTERNAL_VECTOR + i; 403 int vector = FIRST_EXTERNAL_VECTOR + i;
404 if (i >= NR_IRQS) 404 if (i >= NR_IRQS)
405 break; 405 break;
406 if (vector != SYSCALL_VECTOR) 406 /* SYSCALL_VECTOR was reserved in trap_init. */
407 if (!test_bit(vector, used_vectors))
407 set_intr_gate(vector, interrupt[i]); 408 set_intr_gate(vector, interrupt[i]);
408 } 409 }
409 410
diff --git a/arch/x86/kernel/init_task_32.c b/arch/x86/kernel/init_task.c
index d26fc063a760..468c9c437842 100644
--- a/arch/x86/kernel/init_task_32.c
+++ b/arch/x86/kernel/init_task.c
@@ -15,7 +15,6 @@ static struct files_struct init_files = INIT_FILES;
15static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 15static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17struct mm_struct init_mm = INIT_MM(init_mm); 17struct mm_struct init_mm = INIT_MM(init_mm);
18
19EXPORT_SYMBOL(init_mm); 18EXPORT_SYMBOL(init_mm);
20 19
21/* 20/*
@@ -25,7 +24,7 @@ EXPORT_SYMBOL(init_mm);
25 * way process stacks are handled. This is done by having a special 24 * way process stacks are handled. This is done by having a special
26 * "init_task" linker map entry.. 25 * "init_task" linker map entry..
27 */ 26 */
28union thread_union init_thread_union 27union thread_union init_thread_union
29 __attribute__((__section__(".data.init_task"))) = 28 __attribute__((__section__(".data.init_task"))) =
30 { INIT_THREAD_INFO(init_task) }; 29 { INIT_THREAD_INFO(init_task) };
31 30
@@ -35,12 +34,14 @@ union thread_union init_thread_union
35 * All other task structs will be allocated on slabs in fork.c 34 * All other task structs will be allocated on slabs in fork.c
36 */ 35 */
37struct task_struct init_task = INIT_TASK(init_task); 36struct task_struct init_task = INIT_TASK(init_task);
38
39EXPORT_SYMBOL(init_task); 37EXPORT_SYMBOL(init_task);
40 38
41/* 39/*
42 * per-CPU TSS segments. Threads are completely 'soft' on Linux, 40 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
43 * no more per-task TSS's. 41 * no more per-task TSS's. The TSS size is kept cacheline-aligned
44 */ 42 * so they are allowed to end up in the .data.cacheline_aligned
43 * section. Since TSS's are completely CPU-local, we want them
44 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
45 */
45DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; 46DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
46 47
diff --git a/arch/x86/kernel/init_task_64.c b/arch/x86/kernel/init_task_64.c
deleted file mode 100644
index 4ff33d4f8551..000000000000
--- a/arch/x86/kernel/init_task_64.c
+++ /dev/null
@@ -1,54 +0,0 @@
1#include <linux/mm.h>
2#include <linux/module.h>
3#include <linux/sched.h>
4#include <linux/init.h>
5#include <linux/init_task.h>
6#include <linux/fs.h>
7#include <linux/mqueue.h>
8
9#include <asm/uaccess.h>
10#include <asm/pgtable.h>
11#include <asm/desc.h>
12
13static struct fs_struct init_fs = INIT_FS;
14static struct files_struct init_files = INIT_FILES;
15static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17struct mm_struct init_mm = INIT_MM(init_mm);
18
19EXPORT_SYMBOL(init_mm);
20
21/*
22 * Initial task structure.
23 *
24 * We need to make sure that this is 8192-byte aligned due to the
25 * way process stacks are handled. This is done by having a special
26 * "init_task" linker map entry..
27 */
28union thread_union init_thread_union
29 __attribute__((__section__(".data.init_task"))) =
30 { INIT_THREAD_INFO(init_task) };
31
32/*
33 * Initial task structure.
34 *
35 * All other task structs will be allocated on slabs in fork.c
36 */
37struct task_struct init_task = INIT_TASK(init_task);
38
39EXPORT_SYMBOL(init_task);
40/*
41 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
42 * no more per-task TSS's. The TSS size is kept cacheline-aligned
43 * so they are allowed to end up in the .data.cacheline_aligned
44 * section. Since TSS's are completely CPU-local, we want them
45 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
46 */
47DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
48
49/* Copies of the original ist values from the tss are only accessed during
50 * debugging, no special alignment required.
51 */
52DEFINE_PER_CPU(struct orig_ist, orig_ist);
53
54#define ALIGN_TO_4K __attribute__((section(".data.init_task")))
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index 5f10c7189534..0c55e9d86f69 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -1198,7 +1198,7 @@ static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }
1198static int __assign_irq_vector(int irq) 1198static int __assign_irq_vector(int irq)
1199{ 1199{
1200 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; 1200 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1201 int vector, offset, i; 1201 int vector, offset;
1202 1202
1203 BUG_ON((unsigned)irq >= NR_IRQ_VECTORS); 1203 BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
1204 1204
@@ -1215,11 +1215,8 @@ next:
1215 } 1215 }
1216 if (vector == current_vector) 1216 if (vector == current_vector)
1217 return -ENOSPC; 1217 return -ENOSPC;
1218 if (vector == SYSCALL_VECTOR) 1218 if (test_and_set_bit(vector, used_vectors))
1219 goto next; 1219 goto next;
1220 for (i = 0; i < NR_IRQ_VECTORS; i++)
1221 if (irq_vector[i] == vector)
1222 goto next;
1223 1220
1224 current_vector = vector; 1221 current_vector = vector;
1225 current_offset = offset; 1222 current_offset = offset;
@@ -2295,6 +2292,12 @@ static inline void __init check_timer(void)
2295 2292
2296void __init setup_IO_APIC(void) 2293void __init setup_IO_APIC(void)
2297{ 2294{
2295 int i;
2296
2297 /* Reserve all the system vectors. */
2298 for (i = FIRST_SYSTEM_VECTOR; i < NR_VECTORS; i++)
2299 set_bit(i, used_vectors);
2300
2298 enable_IO_APIC(); 2301 enable_IO_APIC();
2299 2302
2300 if (acpi_ioapic) 2303 if (acpi_ioapic)
diff --git a/arch/x86/kernel/mce_64.c b/arch/x86/kernel/mce_64.c
index 66e6b797b2cb..2cf20de5beca 100644
--- a/arch/x86/kernel/mce_64.c
+++ b/arch/x86/kernel/mce_64.c
@@ -799,7 +799,8 @@ static __cpuinit int mce_create_device(unsigned int cpu)
799{ 799{
800 int err; 800 int err;
801 int i; 801 int i;
802 if (!mce_available(&cpu_data[cpu])) 802
803 if (!mce_available(&cpu_data(cpu)))
803 return -EIO; 804 return -EIO;
804 805
805 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject)); 806 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
diff --git a/arch/x86/kernel/mce_amd_64.c b/arch/x86/kernel/mce_amd_64.c
index 0d2afd96aca4..752fb16a817d 100644
--- a/arch/x86/kernel/mce_amd_64.c
+++ b/arch/x86/kernel/mce_amd_64.c
@@ -472,11 +472,11 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
472 sprintf(name, "threshold_bank%i", bank); 472 sprintf(name, "threshold_bank%i", bank);
473 473
474#ifdef CONFIG_SMP 474#ifdef CONFIG_SMP
475 if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */ 475 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
476 i = first_cpu(per_cpu(cpu_core_map, cpu)); 476 i = first_cpu(per_cpu(cpu_core_map, cpu));
477 477
478 /* first core not up yet */ 478 /* first core not up yet */
479 if (cpu_data[i].cpu_core_id) 479 if (cpu_data(i).cpu_core_id)
480 goto out; 480 goto out;
481 481
482 /* already linked */ 482 /* already linked */
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c
index 09cf78110358..09c315214a5e 100644
--- a/arch/x86/kernel/microcode.c
+++ b/arch/x86/kernel/microcode.c
@@ -132,7 +132,7 @@ static struct ucode_cpu_info {
132 132
133static void collect_cpu_info(int cpu_num) 133static void collect_cpu_info(int cpu_num)
134{ 134{
135 struct cpuinfo_x86 *c = cpu_data + cpu_num; 135 struct cpuinfo_x86 *c = &cpu_data(cpu_num);
136 struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; 136 struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
137 unsigned int val[2]; 137 unsigned int val[2];
138 138
@@ -522,7 +522,7 @@ static struct platform_device *microcode_pdev;
522static int cpu_request_microcode(int cpu) 522static int cpu_request_microcode(int cpu)
523{ 523{
524 char name[30]; 524 char name[30];
525 struct cpuinfo_x86 *c = cpu_data + cpu; 525 struct cpuinfo_x86 *c = &cpu_data(cpu);
526 const struct firmware *firmware; 526 const struct firmware *firmware;
527 void *buf; 527 void *buf;
528 unsigned long size; 528 unsigned long size;
@@ -570,7 +570,7 @@ static int cpu_request_microcode(int cpu)
570 570
571static int apply_microcode_check_cpu(int cpu) 571static int apply_microcode_check_cpu(int cpu)
572{ 572{
573 struct cpuinfo_x86 *c = cpu_data + cpu; 573 struct cpuinfo_x86 *c = &cpu_data(cpu);
574 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 574 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
575 cpumask_t old; 575 cpumask_t old;
576 unsigned int val[2]; 576 unsigned int val[2];
diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c
index 8bf0ca03ac8e..ef4aab123581 100644
--- a/arch/x86/kernel/mpparse_64.c
+++ b/arch/x86/kernel/mpparse_64.c
@@ -57,6 +57,8 @@ unsigned long mp_lapic_addr = 0;
57 57
58/* Processor that is doing the boot up */ 58/* Processor that is doing the boot up */
59unsigned int boot_cpu_id = -1U; 59unsigned int boot_cpu_id = -1U;
60EXPORT_SYMBOL(boot_cpu_id);
61
60/* Internal processor count */ 62/* Internal processor count */
61unsigned int num_processors __cpuinitdata = 0; 63unsigned int num_processors __cpuinitdata = 0;
62 64
@@ -86,7 +88,7 @@ static int __init mpf_checksum(unsigned char *mp, int len)
86 return sum & 0xFF; 88 return sum & 0xFF;
87} 89}
88 90
89static void __cpuinit MP_processor_info (struct mpc_config_processor *m) 91static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
90{ 92{
91 int cpu; 93 int cpu;
92 cpumask_t tmp_map; 94 cpumask_t tmp_map;
@@ -123,7 +125,18 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
123 cpu = 0; 125 cpu = 0;
124 } 126 }
125 bios_cpu_apicid[cpu] = m->mpc_apicid; 127 bios_cpu_apicid[cpu] = m->mpc_apicid;
126 x86_cpu_to_apicid[cpu] = m->mpc_apicid; 128 /*
129 * We get called early in the the start_kernel initialization
130 * process when the per_cpu data area is not yet setup, so we
131 * use a static array that is removed after the per_cpu data
132 * area is created.
133 */
134 if (x86_cpu_to_apicid_ptr) {
135 u8 *x86_cpu_to_apicid = (u8 *)x86_cpu_to_apicid_ptr;
136 x86_cpu_to_apicid[cpu] = m->mpc_apicid;
137 } else {
138 per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid;
139 }
127 140
128 cpu_set(cpu, cpu_possible_map); 141 cpu_set(cpu, cpu_possible_map);
129 cpu_set(cpu, cpu_present_map); 142 cpu_set(cpu, cpu_present_map);
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index e18e516cf549..ee6eba4ecfea 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -112,7 +112,7 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
112static int msr_open(struct inode *inode, struct file *file) 112static int msr_open(struct inode *inode, struct file *file)
113{ 113{
114 unsigned int cpu = iminor(file->f_path.dentry->d_inode); 114 unsigned int cpu = iminor(file->f_path.dentry->d_inode);
115 struct cpuinfo_x86 *c = &(cpu_data)[cpu]; 115 struct cpuinfo_x86 *c = &cpu_data(cpu);
116 116
117 if (cpu >= NR_CPUS || !cpu_online(cpu)) 117 if (cpu >= NR_CPUS || !cpu_online(cpu))
118 return -ENXIO; /* No such CPU */ 118 return -ENXIO; /* No such CPU */
diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
index b2b42bdb0a15..afaf9f12c032 100644
--- a/arch/x86/kernel/pci-dma_64.c
+++ b/arch/x86/kernel/pci-dma_64.c
@@ -11,7 +11,7 @@
11#include <asm/iommu.h> 11#include <asm/iommu.h>
12#include <asm/calgary.h> 12#include <asm/calgary.h>
13 13
14int iommu_merge __read_mostly = 0; 14int iommu_merge __read_mostly = 1;
15EXPORT_SYMBOL(iommu_merge); 15EXPORT_SYMBOL(iommu_merge);
16 16
17dma_addr_t bad_dma_address __read_mostly; 17dma_addr_t bad_dma_address __read_mostly;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 044a47745a5c..7b899584d290 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -295,34 +295,52 @@ static int __init idle_setup(char *str)
295} 295}
296early_param("idle", idle_setup); 296early_param("idle", idle_setup);
297 297
298void show_regs(struct pt_regs * regs) 298void __show_registers(struct pt_regs *regs, int all)
299{ 299{
300 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 300 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
301 unsigned long d0, d1, d2, d3, d6, d7; 301 unsigned long d0, d1, d2, d3, d6, d7;
302 unsigned long esp;
303 unsigned short ss, gs;
304
305 if (user_mode_vm(regs)) {
306 esp = regs->esp;
307 ss = regs->xss & 0xffff;
308 savesegment(gs, gs);
309 } else {
310 esp = (unsigned long) (&regs->esp);
311 savesegment(ss, ss);
312 savesegment(gs, gs);
313 }
302 314
303 printk("\n"); 315 printk("\n");
304 printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm); 316 printk("Pid: %d, comm: %s %s (%s %.*s)\n",
305 printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); 317 task_pid_nr(current), current->comm,
318 print_tainted(), init_utsname()->release,
319 (int)strcspn(init_utsname()->version, " "),
320 init_utsname()->version);
321
322 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
323 0xffff & regs->xcs, regs->eip, regs->eflags,
324 smp_processor_id());
306 print_symbol("EIP is at %s\n", regs->eip); 325 print_symbol("EIP is at %s\n", regs->eip);
307 326
308 if (user_mode_vm(regs))
309 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
310 printk(" EFLAGS: %08lx %s (%s %.*s)\n",
311 regs->eflags, print_tainted(), init_utsname()->release,
312 (int)strcspn(init_utsname()->version, " "),
313 init_utsname()->version);
314 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", 327 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
315 regs->eax,regs->ebx,regs->ecx,regs->edx); 328 regs->eax, regs->ebx, regs->ecx, regs->edx);
316 printk("ESI: %08lx EDI: %08lx EBP: %08lx", 329 printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
317 regs->esi, regs->edi, regs->ebp); 330 regs->esi, regs->edi, regs->ebp, esp);
318 printk(" DS: %04x ES: %04x FS: %04x\n", 331 printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
319 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xfs); 332 regs->xds & 0xffff, regs->xes & 0xffff,
333 regs->xfs & 0xffff, gs, ss);
334
335 if (!all)
336 return;
320 337
321 cr0 = read_cr0(); 338 cr0 = read_cr0();
322 cr2 = read_cr2(); 339 cr2 = read_cr2();
323 cr3 = read_cr3(); 340 cr3 = read_cr3();
324 cr4 = read_cr4_safe(); 341 cr4 = read_cr4_safe();
325 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); 342 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
343 cr0, cr2, cr3, cr4);
326 344
327 get_debugreg(d0, 0); 345 get_debugreg(d0, 0);
328 get_debugreg(d1, 1); 346 get_debugreg(d1, 1);
@@ -330,10 +348,16 @@ void show_regs(struct pt_regs * regs)
330 get_debugreg(d3, 3); 348 get_debugreg(d3, 3);
331 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", 349 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
332 d0, d1, d2, d3); 350 d0, d1, d2, d3);
351
333 get_debugreg(d6, 6); 352 get_debugreg(d6, 6);
334 get_debugreg(d7, 7); 353 get_debugreg(d7, 7);
335 printk("DR6: %08lx DR7: %08lx\n", d6, d7); 354 printk("DR6: %08lx DR7: %08lx\n",
355 d6, d7);
356}
336 357
358void show_regs(struct pt_regs *regs)
359{
360 __show_registers(regs, 1);
337 show_trace(NULL, regs, &regs->esp); 361 show_trace(NULL, regs, &regs->esp);
338} 362}
339 363
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index d769e204f942..a4ce1911efdf 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -45,9 +45,12 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
45 if (!(config & 0x2)) 45 if (!(config & 0x2))
46 pci_write_config_byte(dev, 0xf4, config); 46 pci_write_config_byte(dev, 0xf4, config);
47} 47}
48DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance); 48DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
49DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance); 49 quirk_intel_irqbalance);
50DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance); 50DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
51 quirk_intel_irqbalance);
52DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
53 quirk_intel_irqbalance);
51#endif 54#endif
52 55
53#if defined(CONFIG_HPET_TIMER) 56#if defined(CONFIG_HPET_TIMER)
@@ -56,7 +59,8 @@ unsigned long force_hpet_address;
56static enum { 59static enum {
57 NONE_FORCE_HPET_RESUME, 60 NONE_FORCE_HPET_RESUME,
58 OLD_ICH_FORCE_HPET_RESUME, 61 OLD_ICH_FORCE_HPET_RESUME,
59 ICH_FORCE_HPET_RESUME 62 ICH_FORCE_HPET_RESUME,
63 VT8237_FORCE_HPET_RESUME
60} force_hpet_resume_type; 64} force_hpet_resume_type;
61 65
62static void __iomem *rcba_base; 66static void __iomem *rcba_base;
@@ -146,17 +150,17 @@ static void ich_force_enable_hpet(struct pci_dev *dev)
146} 150}
147 151
148DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, 152DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
149 ich_force_enable_hpet); 153 ich_force_enable_hpet);
150DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, 154DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
151 ich_force_enable_hpet); 155 ich_force_enable_hpet);
152DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, 156DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
153 ich_force_enable_hpet); 157 ich_force_enable_hpet);
154DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, 158DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
155 ich_force_enable_hpet); 159 ich_force_enable_hpet);
156DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, 160DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
157 ich_force_enable_hpet); 161 ich_force_enable_hpet);
158DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, 162DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
159 ich_force_enable_hpet); 163 ich_force_enable_hpet);
160 164
161 165
162static struct pci_dev *cached_dev; 166static struct pci_dev *cached_dev;
@@ -232,10 +236,91 @@ static void old_ich_force_enable_hpet(struct pci_dev *dev)
232 printk(KERN_DEBUG "Failed to force enable HPET\n"); 236 printk(KERN_DEBUG "Failed to force enable HPET\n");
233} 237}
234 238
239/*
240 * Undocumented chipset features. Make sure that the user enforced
241 * this.
242 */
243static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
244{
245 if (hpet_force_user)
246 old_ich_force_enable_hpet(dev);
247}
248
249DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
250 old_ich_force_enable_hpet_user);
251DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
252 old_ich_force_enable_hpet_user);
253DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
254 old_ich_force_enable_hpet_user);
255DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
256 old_ich_force_enable_hpet_user);
235DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, 257DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
236 old_ich_force_enable_hpet); 258 old_ich_force_enable_hpet);
237DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12, 259DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
238 old_ich_force_enable_hpet); 260 old_ich_force_enable_hpet);
261
262
263static void vt8237_force_hpet_resume(void)
264{
265 u32 val;
266
267 if (!force_hpet_address || !cached_dev)
268 return;
269
270 val = 0xfed00000 | 0x80;
271 pci_write_config_dword(cached_dev, 0x68, val);
272
273 pci_read_config_dword(cached_dev, 0x68, &val);
274 if (val & 0x80)
275 printk(KERN_DEBUG "Force enabled HPET at resume\n");
276 else
277 BUG();
278}
279
280static void vt8237_force_enable_hpet(struct pci_dev *dev)
281{
282 u32 uninitialized_var(val);
283
284 if (!hpet_force_user || hpet_address || force_hpet_address)
285 return;
286
287 pci_read_config_dword(dev, 0x68, &val);
288 /*
289 * Bit 7 is HPET enable bit.
290 * Bit 31:10 is HPET base address (contrary to what datasheet claims)
291 */
292 if (val & 0x80) {
293 force_hpet_address = (val & ~0x3ff);
294 printk(KERN_DEBUG "HPET at base address 0x%lx\n",
295 force_hpet_address);
296 return;
297 }
298
299 /*
300 * HPET is disabled. Trying enabling at FED00000 and check
301 * whether it sticks
302 */
303 val = 0xfed00000 | 0x80;
304 pci_write_config_dword(dev, 0x68, val);
305
306 pci_read_config_dword(dev, 0x68, &val);
307 if (val & 0x80) {
308 force_hpet_address = (val & ~0x3ff);
309 printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
310 force_hpet_address);
311 cached_dev = dev;
312 force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
313 return;
314 }
315
316 printk(KERN_DEBUG "Failed to force enable HPET\n");
317}
318
319DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
320 vt8237_force_enable_hpet);
321DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
322 vt8237_force_enable_hpet);
323
239 324
240void force_hpet_resume(void) 325void force_hpet_resume(void)
241{ 326{
@@ -246,6 +331,9 @@ void force_hpet_resume(void)
246 case OLD_ICH_FORCE_HPET_RESUME: 331 case OLD_ICH_FORCE_HPET_RESUME:
247 return old_ich_force_hpet_resume(); 332 return old_ich_force_hpet_resume();
248 333
334 case VT8237_FORCE_HPET_RESUME:
335 return vt8237_force_hpet_resume();
336
249 default: 337 default:
250 break; 338 break;
251 } 339 }
diff --git a/arch/x86/kernel/reboot_64.c b/arch/x86/kernel/reboot_64.c
index 368db2b9c5ac..776eb06b6512 100644
--- a/arch/x86/kernel/reboot_64.c
+++ b/arch/x86/kernel/reboot_64.c
@@ -11,6 +11,7 @@
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <asm/io.h> 12#include <asm/io.h>
13#include <asm/delay.h> 13#include <asm/delay.h>
14#include <asm/desc.h>
14#include <asm/hw_irq.h> 15#include <asm/hw_irq.h>
15#include <asm/system.h> 16#include <asm/system.h>
16#include <asm/pgtable.h> 17#include <asm/pgtable.h>
@@ -136,7 +137,7 @@ void machine_emergency_restart(void)
136 } 137 }
137 138
138 case BOOT_TRIPLE: 139 case BOOT_TRIPLE:
139 __asm__ __volatile__("lidt (%0)": :"r" (&no_idt)); 140 load_idt((const struct desc_ptr *)&no_idt);
140 __asm__ __volatile__("int3"); 141 __asm__ __volatile__("int3");
141 142
142 reboot_type = BOOT_KBD; 143 reboot_type = BOOT_KBD;
diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
index 8b30b26ad069..1a07bbea7be3 100644
--- a/arch/x86/kernel/reboot_fixups_32.c
+++ b/arch/x86/kernel/reboot_fixups_32.c
@@ -12,6 +12,7 @@
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <asm/reboot_fixups.h> 13#include <asm/reboot_fixups.h>
14#include <asm/msr.h> 14#include <asm/msr.h>
15#include <asm/geode.h>
15 16
16static void cs5530a_warm_reset(struct pci_dev *dev) 17static void cs5530a_warm_reset(struct pci_dev *dev)
17{ 18{
@@ -24,11 +25,8 @@ static void cs5530a_warm_reset(struct pci_dev *dev)
24 25
25static void cs5536_warm_reset(struct pci_dev *dev) 26static void cs5536_warm_reset(struct pci_dev *dev)
26{ 27{
27 /* 28 /* writing 1 to the LSB of this MSR causes a hard reset */
28 * 6.6.2.12 Soft Reset (DIVIL_SOFT_RESET) 29 wrmsrl(MSR_DIVIL_SOFT_RESET, 1ULL);
29 * writing 1 to the LSB of this MSR causes a hard reset.
30 */
31 wrmsrl(0x51400017, 1ULL);
32 udelay(50); /* shouldn't get here but be safe and spin a while */ 30 udelay(50); /* shouldn't get here but be safe and spin a while */
33} 31}
34 32
diff --git a/arch/x86/kernel/setup64.c b/arch/x86/kernel/setup64.c
index ba9188235057..3558ac78c926 100644
--- a/arch/x86/kernel/setup64.c
+++ b/arch/x86/kernel/setup64.c
@@ -185,6 +185,12 @@ void __cpuinit check_efer(void)
185unsigned long kernel_eflags; 185unsigned long kernel_eflags;
186 186
187/* 187/*
188 * Copies of the original ist values from the tss are only accessed during
189 * debugging, no special alignment required.
190 */
191DEFINE_PER_CPU(struct orig_ist, orig_ist);
192
193/*
188 * cpu_init() initializes state that is per-CPU. Some data is already 194 * cpu_init() initializes state that is per-CPU. Some data is already
189 * initialized (naturally) in the bootstrap process, such as the GDT 195 * initialized (naturally) in the bootstrap process, such as the GDT
190 * and IDT. We reload them nevertheless, this function acts as a 196 * and IDT. We reload them nevertheless, this function acts as a
@@ -224,8 +230,8 @@ void __cpuinit cpu_init (void)
224 memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE); 230 memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
225 231
226 cpu_gdt_descr[cpu].size = GDT_SIZE; 232 cpu_gdt_descr[cpu].size = GDT_SIZE;
227 asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu])); 233 load_gdt((const struct desc_ptr *)&cpu_gdt_descr[cpu]);
228 asm volatile("lidt %0" :: "m" (idt_descr)); 234 load_idt((const struct desc_ptr *)&idt_descr);
229 235
230 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 236 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
231 syscall_init(); 237 syscall_init();
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index 978dc0196a0f..e4f199124761 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -661,9 +661,7 @@ void __init setup_arch(char **cmdline_p)
661#endif 661#endif
662 662
663#ifdef CONFIG_PCI 663#ifdef CONFIG_PCI
664#ifdef CONFIG_X86_IO_APIC 664 early_quirks();
665 check_acpi_pci(); /* Checks more than just ACPI actually */
666#endif
667#endif 665#endif
668 666
669#ifdef CONFIG_ACPI 667#ifdef CONFIG_ACPI
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index cdcba6975226..31322d42eaae 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -302,6 +302,11 @@ void __init setup_arch(char **cmdline_p)
302 302
303 dmi_scan_machine(); 303 dmi_scan_machine();
304 304
305#ifdef CONFIG_SMP
306 /* setup to use the static apicid table during kernel startup */
307 x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
308#endif
309
305#ifdef CONFIG_ACPI 310#ifdef CONFIG_ACPI
306 /* 311 /*
307 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT). 312 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
@@ -554,7 +559,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
554 but in the same order as the HT nodeids. 559 but in the same order as the HT nodeids.
555 If that doesn't result in a usable node fall back to the 560 If that doesn't result in a usable node fall back to the
556 path for the previous case. */ 561 path for the previous case. */
557 int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits); 562 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
558 if (ht_nodeid >= 0 && 563 if (ht_nodeid >= 0 &&
559 apicid_to_node[ht_nodeid] != NUMA_NO_NODE) 564 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
560 node = apicid_to_node[ht_nodeid]; 565 node = apicid_to_node[ht_nodeid];
@@ -878,6 +883,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
878 883
879#ifdef CONFIG_SMP 884#ifdef CONFIG_SMP
880 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; 885 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
886 c->cpu_index = 0;
881#endif 887#endif
882} 888}
883 889
@@ -984,6 +990,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
984static int show_cpuinfo(struct seq_file *m, void *v) 990static int show_cpuinfo(struct seq_file *m, void *v)
985{ 991{
986 struct cpuinfo_x86 *c = v; 992 struct cpuinfo_x86 *c = v;
993 int cpu = 0;
987 994
988 /* 995 /*
989 * These flag bits must match the definitions in <asm/cpufeature.h>. 996 * These flag bits must match the definitions in <asm/cpufeature.h>.
@@ -1062,8 +1069,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1062 1069
1063 1070
1064#ifdef CONFIG_SMP 1071#ifdef CONFIG_SMP
1065 if (!cpu_online(c-cpu_data)) 1072 if (!cpu_online(c->cpu_index))
1066 return 0; 1073 return 0;
1074 cpu = c->cpu_index;
1067#endif 1075#endif
1068 1076
1069 seq_printf(m,"processor\t: %u\n" 1077 seq_printf(m,"processor\t: %u\n"
@@ -1071,7 +1079,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1071 "cpu family\t: %d\n" 1079 "cpu family\t: %d\n"
1072 "model\t\t: %d\n" 1080 "model\t\t: %d\n"
1073 "model name\t: %s\n", 1081 "model name\t: %s\n",
1074 (unsigned)(c-cpu_data), 1082 (unsigned)cpu,
1075 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", 1083 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1076 c->x86, 1084 c->x86,
1077 (int)c->x86_model, 1085 (int)c->x86_model,
@@ -1083,7 +1091,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1083 seq_printf(m, "stepping\t: unknown\n"); 1091 seq_printf(m, "stepping\t: unknown\n");
1084 1092
1085 if (cpu_has(c,X86_FEATURE_TSC)) { 1093 if (cpu_has(c,X86_FEATURE_TSC)) {
1086 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data)); 1094 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
1087 if (!freq) 1095 if (!freq)
1088 freq = cpu_khz; 1096 freq = cpu_khz;
1089 seq_printf(m, "cpu MHz\t\t: %u.%03u\n", 1097 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
@@ -1096,7 +1104,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1096 1104
1097#ifdef CONFIG_SMP 1105#ifdef CONFIG_SMP
1098 if (smp_num_siblings * c->x86_max_cores > 1) { 1106 if (smp_num_siblings * c->x86_max_cores > 1) {
1099 int cpu = c - cpu_data;
1100 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 1107 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1101 seq_printf(m, "siblings\t: %d\n", 1108 seq_printf(m, "siblings\t: %d\n",
1102 cpus_weight(per_cpu(cpu_core_map, cpu))); 1109 cpus_weight(per_cpu(cpu_core_map, cpu)));
@@ -1154,12 +1161,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1154 1161
1155static void *c_start(struct seq_file *m, loff_t *pos) 1162static void *c_start(struct seq_file *m, loff_t *pos)
1156{ 1163{
1157 return *pos < NR_CPUS ? cpu_data + *pos : NULL; 1164 if (*pos == 0) /* just in case, cpu 0 is not the first */
1165 *pos = first_cpu(cpu_possible_map);
1166 if ((*pos) < NR_CPUS && cpu_possible(*pos))
1167 return &cpu_data(*pos);
1168 return NULL;
1158} 1169}
1159 1170
1160static void *c_next(struct seq_file *m, void *v, loff_t *pos) 1171static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1161{ 1172{
1162 ++*pos; 1173 *pos = next_cpu(*pos, cpu_possible_map);
1163 return c_start(m, pos); 1174 return c_start(m, pos);
1164} 1175}
1165 1176
diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c
index 791d9f8036ae..2621ca3b2e4d 100644
--- a/arch/x86/kernel/smp_32.c
+++ b/arch/x86/kernel/smp_32.c
@@ -610,7 +610,7 @@ static void stop_this_cpu (void * dummy)
610 */ 610 */
611 cpu_clear(smp_processor_id(), cpu_online_map); 611 cpu_clear(smp_processor_id(), cpu_online_map);
612 disable_local_APIC(); 612 disable_local_APIC();
613 if (cpu_data[smp_processor_id()].hlt_works_ok) 613 if (cpu_data(smp_processor_id()).hlt_works_ok)
614 for(;;) halt(); 614 for(;;) halt();
615 for (;;); 615 for (;;);
616} 616}
@@ -676,7 +676,7 @@ static int convert_apicid_to_cpu(int apic_id)
676 int i; 676 int i;
677 677
678 for (i = 0; i < NR_CPUS; i++) { 678 for (i = 0; i < NR_CPUS; i++) {
679 if (x86_cpu_to_apicid[i] == apic_id) 679 if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
680 return i; 680 return i;
681 } 681 }
682 return -1; 682 return -1;
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c
index 5c2964727d19..03fa6ed559c6 100644
--- a/arch/x86/kernel/smp_64.c
+++ b/arch/x86/kernel/smp_64.c
@@ -322,17 +322,27 @@ void unlock_ipi_call_lock(void)
322} 322}
323 323
324/* 324/*
325 * this function sends a 'generic call function' IPI to one other CPU 325 * this function sends a 'generic call function' IPI to all other CPU
326 * in the system. 326 * of the system defined in the mask.
327 *
328 * cpu is a standard Linux logical CPU number.
329 */ 327 */
330static void 328
331__smp_call_function_single(int cpu, void (*func) (void *info), void *info, 329static int
332 int nonatomic, int wait) 330__smp_call_function_mask(cpumask_t mask,
331 void (*func)(void *), void *info,
332 int wait)
333{ 333{
334 struct call_data_struct data; 334 struct call_data_struct data;
335 int cpus = 1; 335 cpumask_t allbutself;
336 int cpus;
337
338 allbutself = cpu_online_map;
339 cpu_clear(smp_processor_id(), allbutself);
340
341 cpus_and(mask, mask, allbutself);
342 cpus = cpus_weight(mask);
343
344 if (!cpus)
345 return 0;
336 346
337 data.func = func; 347 data.func = func;
338 data.info = info; 348 data.info = info;
@@ -343,19 +353,55 @@ __smp_call_function_single(int cpu, void (*func) (void *info), void *info,
343 353
344 call_data = &data; 354 call_data = &data;
345 wmb(); 355 wmb();
346 /* Send a message to all other CPUs and wait for them to respond */ 356
347 send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR); 357 /* Send a message to other CPUs */
358 if (cpus_equal(mask, allbutself))
359 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
360 else
361 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
348 362
349 /* Wait for response */ 363 /* Wait for response */
350 while (atomic_read(&data.started) != cpus) 364 while (atomic_read(&data.started) != cpus)
351 cpu_relax(); 365 cpu_relax();
352 366
353 if (!wait) 367 if (!wait)
354 return; 368 return 0;
355 369
356 while (atomic_read(&data.finished) != cpus) 370 while (atomic_read(&data.finished) != cpus)
357 cpu_relax(); 371 cpu_relax();
372
373 return 0;
374}
375/**
376 * smp_call_function_mask(): Run a function on a set of other CPUs.
377 * @mask: The set of cpus to run on. Must not include the current cpu.
378 * @func: The function to run. This must be fast and non-blocking.
379 * @info: An arbitrary pointer to pass to the function.
380 * @wait: If true, wait (atomically) until function has completed on other CPUs.
381 *
382 * Returns 0 on success, else a negative status code.
383 *
384 * If @wait is true, then returns once @func has returned; otherwise
385 * it returns just before the target cpu calls @func.
386 *
387 * You must not call this function with disabled interrupts or from a
388 * hardware interrupt handler or from a bottom half handler.
389 */
390int smp_call_function_mask(cpumask_t mask,
391 void (*func)(void *), void *info,
392 int wait)
393{
394 int ret;
395
396 /* Can deadlock when called with interrupts disabled */
397 WARN_ON(irqs_disabled());
398
399 spin_lock(&call_lock);
400 ret = __smp_call_function_mask(mask, func, info, wait);
401 spin_unlock(&call_lock);
402 return ret;
358} 403}
404EXPORT_SYMBOL(smp_call_function_mask);
359 405
360/* 406/*
361 * smp_call_function_single - Run a function on a specific CPU 407 * smp_call_function_single - Run a function on a specific CPU
@@ -374,6 +420,7 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
374 int nonatomic, int wait) 420 int nonatomic, int wait)
375{ 421{
376 /* prevent preemption and reschedule on another processor */ 422 /* prevent preemption and reschedule on another processor */
423 int ret;
377 int me = get_cpu(); 424 int me = get_cpu();
378 425
379 /* Can deadlock when called with interrupts disabled */ 426 /* Can deadlock when called with interrupts disabled */
@@ -387,51 +434,14 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
387 return 0; 434 return 0;
388 } 435 }
389 436
390 spin_lock(&call_lock); 437 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
391 __smp_call_function_single(cpu, func, info, nonatomic, wait); 438
392 spin_unlock(&call_lock);
393 put_cpu(); 439 put_cpu();
394 return 0; 440 return ret;
395} 441}
396EXPORT_SYMBOL(smp_call_function_single); 442EXPORT_SYMBOL(smp_call_function_single);
397 443
398/* 444/*
399 * this function sends a 'generic call function' IPI to all other CPUs
400 * in the system.
401 */
402static void __smp_call_function (void (*func) (void *info), void *info,
403 int nonatomic, int wait)
404{
405 struct call_data_struct data;
406 int cpus = num_online_cpus()-1;
407
408 if (!cpus)
409 return;
410
411 data.func = func;
412 data.info = info;
413 atomic_set(&data.started, 0);
414 data.wait = wait;
415 if (wait)
416 atomic_set(&data.finished, 0);
417
418 call_data = &data;
419 wmb();
420 /* Send a message to all other CPUs and wait for them to respond */
421 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
422
423 /* Wait for response */
424 while (atomic_read(&data.started) != cpus)
425 cpu_relax();
426
427 if (!wait)
428 return;
429
430 while (atomic_read(&data.finished) != cpus)
431 cpu_relax();
432}
433
434/*
435 * smp_call_function - run a function on all other CPUs. 445 * smp_call_function - run a function on all other CPUs.
436 * @func: The function to run. This must be fast and non-blocking. 446 * @func: The function to run. This must be fast and non-blocking.
437 * @info: An arbitrary pointer to pass to the function. 447 * @info: An arbitrary pointer to pass to the function.
@@ -449,10 +459,7 @@ static void __smp_call_function (void (*func) (void *info), void *info,
449int smp_call_function (void (*func) (void *info), void *info, int nonatomic, 459int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
450 int wait) 460 int wait)
451{ 461{
452 spin_lock(&call_lock); 462 return smp_call_function_mask(cpu_online_map, func, info, wait);
453 __smp_call_function(func,info,nonatomic,wait);
454 spin_unlock(&call_lock);
455 return 0;
456} 463}
457EXPORT_SYMBOL(smp_call_function); 464EXPORT_SYMBOL(smp_call_function);
458 465
@@ -479,7 +486,7 @@ void smp_send_stop(void)
479 /* Don't deadlock on the call lock in panic */ 486 /* Don't deadlock on the call lock in panic */
480 nolock = !spin_trylock(&call_lock); 487 nolock = !spin_trylock(&call_lock);
481 local_irq_save(flags); 488 local_irq_save(flags);
482 __smp_call_function(stop_this_cpu, NULL, 0, 0); 489 __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0);
483 if (!nolock) 490 if (!nolock)
484 spin_unlock(&call_lock); 491 spin_unlock(&call_lock);
485 disable_local_APIC(); 492 disable_local_APIC();
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index be3faac04719..7b8fdfa169dd 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -67,7 +67,7 @@ int smp_num_siblings = 1;
67EXPORT_SYMBOL(smp_num_siblings); 67EXPORT_SYMBOL(smp_num_siblings);
68 68
69/* Last level cache ID of each logical CPU */ 69/* Last level cache ID of each logical CPU */
70int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; 70DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
71 71
72/* representing HT siblings of each logical CPU */ 72/* representing HT siblings of each logical CPU */
73DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); 73DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
@@ -89,12 +89,20 @@ EXPORT_SYMBOL(cpu_possible_map);
89static cpumask_t smp_commenced_mask; 89static cpumask_t smp_commenced_mask;
90 90
91/* Per CPU bogomips and other parameters */ 91/* Per CPU bogomips and other parameters */
92struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; 92DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
93EXPORT_SYMBOL(cpu_data); 93EXPORT_PER_CPU_SYMBOL(cpu_info);
94 94
95u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = 95/*
96 { [0 ... NR_CPUS-1] = 0xff }; 96 * The following static array is used during kernel startup
97EXPORT_SYMBOL(x86_cpu_to_apicid); 97 * and the x86_cpu_to_apicid_ptr contains the address of the
98 * array during this time. Is it zeroed when the per_cpu
99 * data area is removed.
100 */
101u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata =
102 { [0 ... NR_CPUS-1] = BAD_APICID };
103void *x86_cpu_to_apicid_ptr;
104DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
105EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
98 106
99u8 apicid_2_node[MAX_APICID]; 107u8 apicid_2_node[MAX_APICID];
100 108
@@ -150,9 +158,10 @@ void __init smp_alloc_memory(void)
150 158
151void __cpuinit smp_store_cpu_info(int id) 159void __cpuinit smp_store_cpu_info(int id)
152{ 160{
153 struct cpuinfo_x86 *c = cpu_data + id; 161 struct cpuinfo_x86 *c = &cpu_data(id);
154 162
155 *c = boot_cpu_data; 163 *c = boot_cpu_data;
164 c->cpu_index = id;
156 if (id!=0) 165 if (id!=0)
157 identify_secondary_cpu(c); 166 identify_secondary_cpu(c);
158 /* 167 /*
@@ -294,7 +303,7 @@ static int cpucount;
294/* maps the cpu to the sched domain representing multi-core */ 303/* maps the cpu to the sched domain representing multi-core */
295cpumask_t cpu_coregroup_map(int cpu) 304cpumask_t cpu_coregroup_map(int cpu)
296{ 305{
297 struct cpuinfo_x86 *c = cpu_data + cpu; 306 struct cpuinfo_x86 *c = &cpu_data(cpu);
298 /* 307 /*
299 * For perf, we return last level cache shared map. 308 * For perf, we return last level cache shared map.
300 * And for power savings, we return cpu_core_map 309 * And for power savings, we return cpu_core_map
@@ -311,41 +320,41 @@ static cpumask_t cpu_sibling_setup_map;
311void __cpuinit set_cpu_sibling_map(int cpu) 320void __cpuinit set_cpu_sibling_map(int cpu)
312{ 321{
313 int i; 322 int i;
314 struct cpuinfo_x86 *c = cpu_data; 323 struct cpuinfo_x86 *c = &cpu_data(cpu);
315 324
316 cpu_set(cpu, cpu_sibling_setup_map); 325 cpu_set(cpu, cpu_sibling_setup_map);
317 326
318 if (smp_num_siblings > 1) { 327 if (smp_num_siblings > 1) {
319 for_each_cpu_mask(i, cpu_sibling_setup_map) { 328 for_each_cpu_mask(i, cpu_sibling_setup_map) {
320 if (c[cpu].phys_proc_id == c[i].phys_proc_id && 329 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
321 c[cpu].cpu_core_id == c[i].cpu_core_id) { 330 c->cpu_core_id == cpu_data(i).cpu_core_id) {
322 cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 331 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
323 cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 332 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
324 cpu_set(i, per_cpu(cpu_core_map, cpu)); 333 cpu_set(i, per_cpu(cpu_core_map, cpu));
325 cpu_set(cpu, per_cpu(cpu_core_map, i)); 334 cpu_set(cpu, per_cpu(cpu_core_map, i));
326 cpu_set(i, c[cpu].llc_shared_map); 335 cpu_set(i, c->llc_shared_map);
327 cpu_set(cpu, c[i].llc_shared_map); 336 cpu_set(cpu, cpu_data(i).llc_shared_map);
328 } 337 }
329 } 338 }
330 } else { 339 } else {
331 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 340 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
332 } 341 }
333 342
334 cpu_set(cpu, c[cpu].llc_shared_map); 343 cpu_set(cpu, c->llc_shared_map);
335 344
336 if (current_cpu_data.x86_max_cores == 1) { 345 if (current_cpu_data.x86_max_cores == 1) {
337 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); 346 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
338 c[cpu].booted_cores = 1; 347 c->booted_cores = 1;
339 return; 348 return;
340 } 349 }
341 350
342 for_each_cpu_mask(i, cpu_sibling_setup_map) { 351 for_each_cpu_mask(i, cpu_sibling_setup_map) {
343 if (cpu_llc_id[cpu] != BAD_APICID && 352 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
344 cpu_llc_id[cpu] == cpu_llc_id[i]) { 353 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
345 cpu_set(i, c[cpu].llc_shared_map); 354 cpu_set(i, c->llc_shared_map);
346 cpu_set(cpu, c[i].llc_shared_map); 355 cpu_set(cpu, cpu_data(i).llc_shared_map);
347 } 356 }
348 if (c[cpu].phys_proc_id == c[i].phys_proc_id) { 357 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
349 cpu_set(i, per_cpu(cpu_core_map, cpu)); 358 cpu_set(i, per_cpu(cpu_core_map, cpu));
350 cpu_set(cpu, per_cpu(cpu_core_map, i)); 359 cpu_set(cpu, per_cpu(cpu_core_map, i));
351 /* 360 /*
@@ -357,15 +366,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
357 * the booted_cores for this new cpu 366 * the booted_cores for this new cpu
358 */ 367 */
359 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) 368 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
360 c[cpu].booted_cores++; 369 c->booted_cores++;
361 /* 370 /*
362 * increment the core count for all 371 * increment the core count for all
363 * the other cpus in this package 372 * the other cpus in this package
364 */ 373 */
365 if (i != cpu) 374 if (i != cpu)
366 c[i].booted_cores++; 375 cpu_data(i).booted_cores++;
367 } else if (i != cpu && !c[cpu].booted_cores) 376 } else if (i != cpu && !c->booted_cores)
368 c[cpu].booted_cores = c[i].booted_cores; 377 c->booted_cores = cpu_data(i).booted_cores;
369 } 378 }
370 } 379 }
371} 380}
@@ -804,7 +813,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
804 813
805 irq_ctx_init(cpu); 814 irq_ctx_init(cpu);
806 815
807 x86_cpu_to_apicid[cpu] = apicid; 816 per_cpu(x86_cpu_to_apicid, cpu) = apicid;
808 /* 817 /*
809 * This grunge runs the startup process for 818 * This grunge runs the startup process for
810 * the targeted processor. 819 * the targeted processor.
@@ -844,7 +853,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
844 /* number CPUs logically, starting from 1 (BSP is 0) */ 853 /* number CPUs logically, starting from 1 (BSP is 0) */
845 Dprintk("OK.\n"); 854 Dprintk("OK.\n");
846 printk("CPU%d: ", cpu); 855 printk("CPU%d: ", cpu);
847 print_cpu_info(&cpu_data[cpu]); 856 print_cpu_info(&cpu_data(cpu));
848 Dprintk("CPU has booted.\n"); 857 Dprintk("CPU has booted.\n");
849 } else { 858 } else {
850 boot_error= 1; 859 boot_error= 1;
@@ -866,7 +875,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
866 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ 875 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
867 cpucount--; 876 cpucount--;
868 } else { 877 } else {
869 x86_cpu_to_apicid[cpu] = apicid; 878 per_cpu(x86_cpu_to_apicid, cpu) = apicid;
870 cpu_set(cpu, cpu_present_map); 879 cpu_set(cpu, cpu_present_map);
871 } 880 }
872 881
@@ -915,7 +924,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
915 struct warm_boot_cpu_info info; 924 struct warm_boot_cpu_info info;
916 int apicid, ret; 925 int apicid, ret;
917 926
918 apicid = x86_cpu_to_apicid[cpu]; 927 apicid = per_cpu(x86_cpu_to_apicid, cpu);
919 if (apicid == BAD_APICID) { 928 if (apicid == BAD_APICID) {
920 ret = -ENODEV; 929 ret = -ENODEV;
921 goto exit; 930 goto exit;
@@ -961,11 +970,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
961 */ 970 */
962 smp_store_cpu_info(0); /* Final full version of the data */ 971 smp_store_cpu_info(0); /* Final full version of the data */
963 printk("CPU%d: ", 0); 972 printk("CPU%d: ", 0);
964 print_cpu_info(&cpu_data[0]); 973 print_cpu_info(&cpu_data(0));
965 974
966 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); 975 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
967 boot_cpu_logical_apicid = logical_smp_processor_id(); 976 boot_cpu_logical_apicid = logical_smp_processor_id();
968 x86_cpu_to_apicid[0] = boot_cpu_physical_apicid; 977 per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid;
969 978
970 current_thread_info()->cpu = 0; 979 current_thread_info()->cpu = 0;
971 980
@@ -1008,6 +1017,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1008 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); 1017 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1009 smpboot_clear_io_apic_irqs(); 1018 smpboot_clear_io_apic_irqs();
1010 phys_cpu_present_map = physid_mask_of_physid(0); 1019 phys_cpu_present_map = physid_mask_of_physid(0);
1020 map_cpu_to_logical_apicid();
1011 cpu_set(0, per_cpu(cpu_sibling_map, 0)); 1021 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1012 cpu_set(0, per_cpu(cpu_core_map, 0)); 1022 cpu_set(0, per_cpu(cpu_core_map, 0));
1013 return; 1023 return;
@@ -1029,6 +1039,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1029 } 1039 }
1030 smpboot_clear_io_apic_irqs(); 1040 smpboot_clear_io_apic_irqs();
1031 phys_cpu_present_map = physid_mask_of_physid(0); 1041 phys_cpu_present_map = physid_mask_of_physid(0);
1042 map_cpu_to_logical_apicid();
1032 cpu_set(0, per_cpu(cpu_sibling_map, 0)); 1043 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1033 cpu_set(0, per_cpu(cpu_core_map, 0)); 1044 cpu_set(0, per_cpu(cpu_core_map, 0));
1034 return; 1045 return;
@@ -1082,7 +1093,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1082 Dprintk("Before bogomips.\n"); 1093 Dprintk("Before bogomips.\n");
1083 for (cpu = 0; cpu < NR_CPUS; cpu++) 1094 for (cpu = 0; cpu < NR_CPUS; cpu++)
1084 if (cpu_isset(cpu, cpu_callout_map)) 1095 if (cpu_isset(cpu, cpu_callout_map))
1085 bogosum += cpu_data[cpu].loops_per_jiffy; 1096 bogosum += cpu_data(cpu).loops_per_jiffy;
1086 printk(KERN_INFO 1097 printk(KERN_INFO
1087 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 1098 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
1088 cpucount+1, 1099 cpucount+1,
@@ -1152,7 +1163,7 @@ void __init native_smp_prepare_boot_cpu(void)
1152void remove_siblinginfo(int cpu) 1163void remove_siblinginfo(int cpu)
1153{ 1164{
1154 int sibling; 1165 int sibling;
1155 struct cpuinfo_x86 *c = cpu_data; 1166 struct cpuinfo_x86 *c = &cpu_data(cpu);
1156 1167
1157 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { 1168 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
1158 cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); 1169 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
@@ -1160,15 +1171,15 @@ void remove_siblinginfo(int cpu)
1160 * last thread sibling in this cpu core going down 1171 * last thread sibling in this cpu core going down
1161 */ 1172 */
1162 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) 1173 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1163 c[sibling].booted_cores--; 1174 cpu_data(sibling).booted_cores--;
1164 } 1175 }
1165 1176
1166 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) 1177 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
1167 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); 1178 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1168 cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1179 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1169 cpus_clear(per_cpu(cpu_core_map, cpu)); 1180 cpus_clear(per_cpu(cpu_core_map, cpu));
1170 c[cpu].phys_proc_id = 0; 1181 c->phys_proc_id = 0;
1171 c[cpu].cpu_core_id = 0; 1182 c->cpu_core_id = 0;
1172 cpu_clear(cpu, cpu_sibling_setup_map); 1183 cpu_clear(cpu, cpu_sibling_setup_map);
1173} 1184}
1174 1185
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index e351ac4ab5b1..fd1fff6a35a2 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -65,7 +65,7 @@ int smp_num_siblings = 1;
65EXPORT_SYMBOL(smp_num_siblings); 65EXPORT_SYMBOL(smp_num_siblings);
66 66
67/* Last level cache ID of each logical CPU */ 67/* Last level cache ID of each logical CPU */
68u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; 68DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
69 69
70/* Bitmask of currently online CPUs */ 70/* Bitmask of currently online CPUs */
71cpumask_t cpu_online_map __read_mostly; 71cpumask_t cpu_online_map __read_mostly;
@@ -84,8 +84,8 @@ cpumask_t cpu_possible_map;
84EXPORT_SYMBOL(cpu_possible_map); 84EXPORT_SYMBOL(cpu_possible_map);
85 85
86/* Per CPU bogomips and other parameters */ 86/* Per CPU bogomips and other parameters */
87struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; 87DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
88EXPORT_SYMBOL(cpu_data); 88EXPORT_PER_CPU_SYMBOL(cpu_info);
89 89
90/* Set when the idlers are all forked */ 90/* Set when the idlers are all forked */
91int smp_threads_ready; 91int smp_threads_ready;
@@ -138,9 +138,10 @@ static unsigned long __cpuinit setup_trampoline(void)
138 138
139static void __cpuinit smp_store_cpu_info(int id) 139static void __cpuinit smp_store_cpu_info(int id)
140{ 140{
141 struct cpuinfo_x86 *c = cpu_data + id; 141 struct cpuinfo_x86 *c = &cpu_data(id);
142 142
143 *c = boot_cpu_data; 143 *c = boot_cpu_data;
144 c->cpu_index = id;
144 identify_cpu(c); 145 identify_cpu(c);
145 print_cpu_info(c); 146 print_cpu_info(c);
146} 147}
@@ -237,7 +238,7 @@ void __cpuinit smp_callin(void)
237/* maps the cpu to the sched domain representing multi-core */ 238/* maps the cpu to the sched domain representing multi-core */
238cpumask_t cpu_coregroup_map(int cpu) 239cpumask_t cpu_coregroup_map(int cpu)
239{ 240{
240 struct cpuinfo_x86 *c = cpu_data + cpu; 241 struct cpuinfo_x86 *c = &cpu_data(cpu);
241 /* 242 /*
242 * For perf, we return last level cache shared map. 243 * For perf, we return last level cache shared map.
243 * And for power savings, we return cpu_core_map 244 * And for power savings, we return cpu_core_map
@@ -254,41 +255,41 @@ static cpumask_t cpu_sibling_setup_map;
254static inline void set_cpu_sibling_map(int cpu) 255static inline void set_cpu_sibling_map(int cpu)
255{ 256{
256 int i; 257 int i;
257 struct cpuinfo_x86 *c = cpu_data; 258 struct cpuinfo_x86 *c = &cpu_data(cpu);
258 259
259 cpu_set(cpu, cpu_sibling_setup_map); 260 cpu_set(cpu, cpu_sibling_setup_map);
260 261
261 if (smp_num_siblings > 1) { 262 if (smp_num_siblings > 1) {
262 for_each_cpu_mask(i, cpu_sibling_setup_map) { 263 for_each_cpu_mask(i, cpu_sibling_setup_map) {
263 if (c[cpu].phys_proc_id == c[i].phys_proc_id && 264 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
264 c[cpu].cpu_core_id == c[i].cpu_core_id) { 265 c->cpu_core_id == cpu_data(i).cpu_core_id) {
265 cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 266 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
266 cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 267 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
267 cpu_set(i, per_cpu(cpu_core_map, cpu)); 268 cpu_set(i, per_cpu(cpu_core_map, cpu));
268 cpu_set(cpu, per_cpu(cpu_core_map, i)); 269 cpu_set(cpu, per_cpu(cpu_core_map, i));
269 cpu_set(i, c[cpu].llc_shared_map); 270 cpu_set(i, c->llc_shared_map);
270 cpu_set(cpu, c[i].llc_shared_map); 271 cpu_set(cpu, cpu_data(i).llc_shared_map);
271 } 272 }
272 } 273 }
273 } else { 274 } else {
274 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 275 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
275 } 276 }
276 277
277 cpu_set(cpu, c[cpu].llc_shared_map); 278 cpu_set(cpu, c->llc_shared_map);
278 279
279 if (current_cpu_data.x86_max_cores == 1) { 280 if (current_cpu_data.x86_max_cores == 1) {
280 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); 281 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
281 c[cpu].booted_cores = 1; 282 c->booted_cores = 1;
282 return; 283 return;
283 } 284 }
284 285
285 for_each_cpu_mask(i, cpu_sibling_setup_map) { 286 for_each_cpu_mask(i, cpu_sibling_setup_map) {
286 if (cpu_llc_id[cpu] != BAD_APICID && 287 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
287 cpu_llc_id[cpu] == cpu_llc_id[i]) { 288 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
288 cpu_set(i, c[cpu].llc_shared_map); 289 cpu_set(i, c->llc_shared_map);
289 cpu_set(cpu, c[i].llc_shared_map); 290 cpu_set(cpu, cpu_data(i).llc_shared_map);
290 } 291 }
291 if (c[cpu].phys_proc_id == c[i].phys_proc_id) { 292 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
292 cpu_set(i, per_cpu(cpu_core_map, cpu)); 293 cpu_set(i, per_cpu(cpu_core_map, cpu));
293 cpu_set(cpu, per_cpu(cpu_core_map, i)); 294 cpu_set(cpu, per_cpu(cpu_core_map, i));
294 /* 295 /*
@@ -300,15 +301,15 @@ static inline void set_cpu_sibling_map(int cpu)
300 * the booted_cores for this new cpu 301 * the booted_cores for this new cpu
301 */ 302 */
302 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) 303 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
303 c[cpu].booted_cores++; 304 c->booted_cores++;
304 /* 305 /*
305 * increment the core count for all 306 * increment the core count for all
306 * the other cpus in this package 307 * the other cpus in this package
307 */ 308 */
308 if (i != cpu) 309 if (i != cpu)
309 c[i].booted_cores++; 310 cpu_data(i).booted_cores++;
310 } else if (i != cpu && !c[cpu].booted_cores) 311 } else if (i != cpu && !c->booted_cores)
311 c[cpu].booted_cores = c[i].booted_cores; 312 c->booted_cores = cpu_data(i).booted_cores;
312 } 313 }
313 } 314 }
314} 315}
@@ -694,7 +695,7 @@ do_rest:
694 clear_node_cpumask(cpu); /* was set by numa_add_cpu */ 695 clear_node_cpumask(cpu); /* was set by numa_add_cpu */
695 cpu_clear(cpu, cpu_present_map); 696 cpu_clear(cpu, cpu_present_map);
696 cpu_clear(cpu, cpu_possible_map); 697 cpu_clear(cpu, cpu_possible_map);
697 x86_cpu_to_apicid[cpu] = BAD_APICID; 698 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
698 return -EIO; 699 return -EIO;
699 } 700 }
700 701
@@ -841,6 +842,26 @@ static int __init smp_sanity_check(unsigned max_cpus)
841} 842}
842 843
843/* 844/*
845 * Copy apicid's found by MP_processor_info from initial array to the per cpu
846 * data area. The x86_cpu_to_apicid_init array is then expendable and the
847 * x86_cpu_to_apicid_ptr is zeroed indicating that the static array is no
848 * longer available.
849 */
850void __init smp_set_apicids(void)
851{
852 int cpu;
853
854 for_each_cpu_mask(cpu, cpu_possible_map) {
855 if (per_cpu_offset(cpu))
856 per_cpu(x86_cpu_to_apicid, cpu) =
857 x86_cpu_to_apicid_init[cpu];
858 }
859
860 /* indicate the static array will be going away soon */
861 x86_cpu_to_apicid_ptr = NULL;
862}
863
864/*
844 * Prepare for SMP bootup. The MP table or ACPI has been read 865 * Prepare for SMP bootup. The MP table or ACPI has been read
845 * earlier. Just do some sanity checking here and enable APIC mode. 866 * earlier. Just do some sanity checking here and enable APIC mode.
846 */ 867 */
@@ -849,6 +870,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
849 nmi_watchdog_default(); 870 nmi_watchdog_default();
850 current_cpu_data = boot_cpu_data; 871 current_cpu_data = boot_cpu_data;
851 current_thread_info()->cpu = 0; /* needed? */ 872 current_thread_info()->cpu = 0; /* needed? */
873 smp_set_apicids();
852 set_cpu_sibling_map(0); 874 set_cpu_sibling_map(0);
853 875
854 if (smp_sanity_check(max_cpus) < 0) { 876 if (smp_sanity_check(max_cpus) < 0) {
@@ -968,7 +990,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
968static void remove_siblinginfo(int cpu) 990static void remove_siblinginfo(int cpu)
969{ 991{
970 int sibling; 992 int sibling;
971 struct cpuinfo_x86 *c = cpu_data; 993 struct cpuinfo_x86 *c = &cpu_data(cpu);
972 994
973 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { 995 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
974 cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); 996 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
@@ -976,15 +998,15 @@ static void remove_siblinginfo(int cpu)
976 * last thread sibling in this cpu core going down 998 * last thread sibling in this cpu core going down
977 */ 999 */
978 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) 1000 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
979 c[sibling].booted_cores--; 1001 cpu_data(sibling).booted_cores--;
980 } 1002 }
981 1003
982 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) 1004 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
983 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); 1005 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
984 cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1006 cpus_clear(per_cpu(cpu_sibling_map, cpu));
985 cpus_clear(per_cpu(cpu_core_map, cpu)); 1007 cpus_clear(per_cpu(cpu_core_map, cpu));
986 c[cpu].phys_proc_id = 0; 1008 c->phys_proc_id = 0;
987 c[cpu].cpu_core_id = 0; 1009 c->cpu_core_id = 0;
988 cpu_clear(cpu, cpu_sibling_setup_map); 1010 cpu_clear(cpu, cpu_sibling_setup_map);
989} 1011}
990 1012
diff --git a/arch/x86/kernel/suspend_64.c b/arch/x86/kernel/suspend_64.c
index f8fafe527ff1..622bb0268284 100644
--- a/arch/x86/kernel/suspend_64.c
+++ b/arch/x86/kernel/suspend_64.c
@@ -32,9 +32,9 @@ void __save_processor_state(struct saved_context *ctxt)
32 /* 32 /*
33 * descriptor tables 33 * descriptor tables
34 */ 34 */
35 asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit)); 35 store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
36 asm volatile ("sidt %0" : "=m" (ctxt->idt_limit)); 36 store_idt((struct desc_ptr *)&ctxt->idt_limit);
37 asm volatile ("str %0" : "=m" (ctxt->tr)); 37 store_tr(ctxt->tr);
38 38
39 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ 39 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
40 /* 40 /*
@@ -91,8 +91,9 @@ void __restore_processor_state(struct saved_context *ctxt)
91 * now restore the descriptor tables to their proper values 91 * now restore the descriptor tables to their proper values
92 * ltr is done i fix_processor_context(). 92 * ltr is done i fix_processor_context().
93 */ 93 */
94 asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit)); 94 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
95 asm volatile ("lidt %0" :: "m" (ctxt->idt_limit)); 95 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
96
96 97
97 /* 98 /*
98 * segment registers 99 * segment registers
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index 1e9d57256eb1..cc9acace7e23 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -63,6 +63,9 @@
63 63
64int panic_on_unrecovered_nmi; 64int panic_on_unrecovered_nmi;
65 65
66DECLARE_BITMAP(used_vectors, NR_VECTORS);
67EXPORT_SYMBOL_GPL(used_vectors);
68
66asmlinkage int system_call(void); 69asmlinkage int system_call(void);
67 70
68/* Do we ignore FPU interrupts ? */ 71/* Do we ignore FPU interrupts ? */
@@ -288,33 +291,9 @@ EXPORT_SYMBOL(dump_stack);
288void show_registers(struct pt_regs *regs) 291void show_registers(struct pt_regs *regs)
289{ 292{
290 int i; 293 int i;
291 int in_kernel = 1; 294
292 unsigned long esp;
293 unsigned short ss, gs;
294
295 esp = (unsigned long) (&regs->esp);
296 savesegment(ss, ss);
297 savesegment(gs, gs);
298 if (user_mode_vm(regs)) {
299 in_kernel = 0;
300 esp = regs->esp;
301 ss = regs->xss & 0xffff;
302 }
303 print_modules(); 295 print_modules();
304 printk(KERN_EMERG "CPU: %d\n" 296 __show_registers(regs, 0);
305 KERN_EMERG "EIP: %04x:[<%08lx>] %s VLI\n"
306 KERN_EMERG "EFLAGS: %08lx (%s %.*s)\n",
307 smp_processor_id(), 0xffff & regs->xcs, regs->eip,
308 print_tainted(), regs->eflags, init_utsname()->release,
309 (int)strcspn(init_utsname()->version, " "),
310 init_utsname()->version);
311 print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
312 printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
313 regs->eax, regs->ebx, regs->ecx, regs->edx);
314 printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
315 regs->esi, regs->edi, regs->ebp, esp);
316 printk(KERN_EMERG "ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
317 regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss);
318 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)", 297 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
319 TASK_COMM_LEN, current->comm, task_pid_nr(current), 298 TASK_COMM_LEN, current->comm, task_pid_nr(current),
320 current_thread_info(), current, task_thread_info(current)); 299 current_thread_info(), current, task_thread_info(current));
@@ -322,14 +301,14 @@ void show_registers(struct pt_regs *regs)
322 * When in-kernel, we also print out the stack and code at the 301 * When in-kernel, we also print out the stack and code at the
323 * time of the fault.. 302 * time of the fault..
324 */ 303 */
325 if (in_kernel) { 304 if (!user_mode_vm(regs)) {
326 u8 *eip; 305 u8 *eip;
327 unsigned int code_prologue = code_bytes * 43 / 64; 306 unsigned int code_prologue = code_bytes * 43 / 64;
328 unsigned int code_len = code_bytes; 307 unsigned int code_len = code_bytes;
329 unsigned char c; 308 unsigned char c;
330 309
331 printk("\n" KERN_EMERG "Stack: "); 310 printk("\n" KERN_EMERG "Stack: ");
332 show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG); 311 show_stack_log_lvl(NULL, regs, &regs->esp, KERN_EMERG);
333 312
334 printk(KERN_EMERG "Code: "); 313 printk(KERN_EMERG "Code: ");
335 314
@@ -374,11 +353,11 @@ int is_valid_bugaddr(unsigned long eip)
374void die(const char * str, struct pt_regs * regs, long err) 353void die(const char * str, struct pt_regs * regs, long err)
375{ 354{
376 static struct { 355 static struct {
377 spinlock_t lock; 356 raw_spinlock_t lock;
378 u32 lock_owner; 357 u32 lock_owner;
379 int lock_owner_depth; 358 int lock_owner_depth;
380 } die = { 359 } die = {
381 .lock = __SPIN_LOCK_UNLOCKED(die.lock), 360 .lock = __RAW_SPIN_LOCK_UNLOCKED,
382 .lock_owner = -1, 361 .lock_owner = -1,
383 .lock_owner_depth = 0 362 .lock_owner_depth = 0
384 }; 363 };
@@ -389,13 +368,14 @@ void die(const char * str, struct pt_regs * regs, long err)
389 368
390 if (die.lock_owner != raw_smp_processor_id()) { 369 if (die.lock_owner != raw_smp_processor_id()) {
391 console_verbose(); 370 console_verbose();
392 spin_lock_irqsave(&die.lock, flags); 371 __raw_spin_lock(&die.lock);
372 raw_local_save_flags(flags);
393 die.lock_owner = smp_processor_id(); 373 die.lock_owner = smp_processor_id();
394 die.lock_owner_depth = 0; 374 die.lock_owner_depth = 0;
395 bust_spinlocks(1); 375 bust_spinlocks(1);
396 } 376 }
397 else 377 else
398 local_save_flags(flags); 378 raw_local_save_flags(flags);
399 379
400 if (++die.lock_owner_depth < 3) { 380 if (++die.lock_owner_depth < 3) {
401 unsigned long esp; 381 unsigned long esp;
@@ -439,7 +419,8 @@ void die(const char * str, struct pt_regs * regs, long err)
439 bust_spinlocks(0); 419 bust_spinlocks(0);
440 die.lock_owner = -1; 420 die.lock_owner = -1;
441 add_taint(TAINT_DIE); 421 add_taint(TAINT_DIE);
442 spin_unlock_irqrestore(&die.lock, flags); 422 __raw_spin_unlock(&die.lock);
423 raw_local_irq_restore(flags);
443 424
444 if (!regs) 425 if (!regs)
445 return; 426 return;
@@ -1142,6 +1123,8 @@ static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
1142 1123
1143void __init trap_init(void) 1124void __init trap_init(void)
1144{ 1125{
1126 int i;
1127
1145#ifdef CONFIG_EISA 1128#ifdef CONFIG_EISA
1146 void __iomem *p = ioremap(0x0FFFD9, 4); 1129 void __iomem *p = ioremap(0x0FFFD9, 4);
1147 if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) { 1130 if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
@@ -1201,6 +1184,11 @@ void __init trap_init(void)
1201 1184
1202 set_system_gate(SYSCALL_VECTOR,&system_call); 1185 set_system_gate(SYSCALL_VECTOR,&system_call);
1203 1186
1187 /* Reserve all the builtin and the syscall vector. */
1188 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
1189 set_bit(i, used_vectors);
1190 set_bit(SYSCALL_VECTOR, used_vectors);
1191
1204 /* 1192 /*
1205 * Should be a barrier for any external CPU state. 1193 * Should be a barrier for any external CPU state.
1206 */ 1194 */
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index b4a9b3db1994..df690c3fa458 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -462,7 +462,7 @@ void out_of_line_bug(void)
462EXPORT_SYMBOL(out_of_line_bug); 462EXPORT_SYMBOL(out_of_line_bug);
463#endif 463#endif
464 464
465static DEFINE_SPINLOCK(die_lock); 465static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
466static int die_owner = -1; 466static int die_owner = -1;
467static unsigned int die_nest_count; 467static unsigned int die_nest_count;
468 468
@@ -474,13 +474,13 @@ unsigned __kprobes long oops_begin(void)
474 oops_enter(); 474 oops_enter();
475 475
476 /* racy, but better than risking deadlock. */ 476 /* racy, but better than risking deadlock. */
477 local_irq_save(flags); 477 raw_local_irq_save(flags);
478 cpu = smp_processor_id(); 478 cpu = smp_processor_id();
479 if (!spin_trylock(&die_lock)) { 479 if (!__raw_spin_trylock(&die_lock)) {
480 if (cpu == die_owner) 480 if (cpu == die_owner)
481 /* nested oops. should stop eventually */; 481 /* nested oops. should stop eventually */;
482 else 482 else
483 spin_lock(&die_lock); 483 __raw_spin_lock(&die_lock);
484 } 484 }
485 die_nest_count++; 485 die_nest_count++;
486 die_owner = cpu; 486 die_owner = cpu;
@@ -494,12 +494,10 @@ void __kprobes oops_end(unsigned long flags)
494 die_owner = -1; 494 die_owner = -1;
495 bust_spinlocks(0); 495 bust_spinlocks(0);
496 die_nest_count--; 496 die_nest_count--;
497 if (die_nest_count) 497 if (!die_nest_count)
498 /* We still own the lock */
499 local_irq_restore(flags);
500 else
501 /* Nest count reaches zero, release the lock. */ 498 /* Nest count reaches zero, release the lock. */
502 spin_unlock_irqrestore(&die_lock, flags); 499 __raw_spin_unlock(&die_lock);
500 raw_local_irq_restore(flags);
503 if (panic_on_oops) 501 if (panic_on_oops)
504 panic("Fatal exception"); 502 panic("Fatal exception");
505 oops_exit(); 503 oops_exit();
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index e87a3939ed40..b8a7cf671432 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -181,8 +181,8 @@ int recalibrate_cpu_khz(void)
181 if (cpu_has_tsc) { 181 if (cpu_has_tsc) {
182 cpu_khz = calculate_cpu_khz(); 182 cpu_khz = calculate_cpu_khz();
183 tsc_khz = cpu_khz; 183 tsc_khz = cpu_khz;
184 cpu_data[0].loops_per_jiffy = 184 cpu_data(0).loops_per_jiffy =
185 cpufreq_scale(cpu_data[0].loops_per_jiffy, 185 cpufreq_scale(cpu_data(0).loops_per_jiffy,
186 cpu_khz_old, cpu_khz); 186 cpu_khz_old, cpu_khz);
187 return 0; 187 return 0;
188 } else 188 } else
@@ -215,7 +215,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
215 return 0; 215 return 0;
216 } 216 }
217 ref_freq = freq->old; 217 ref_freq = freq->old;
218 loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; 218 loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
219 cpu_khz_ref = cpu_khz; 219 cpu_khz_ref = cpu_khz;
220 } 220 }
221 221
@@ -223,7 +223,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
223 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || 223 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
224 (val == CPUFREQ_RESUMECHANGE)) { 224 (val == CPUFREQ_RESUMECHANGE)) {
225 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 225 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
226 cpu_data[freq->cpu].loops_per_jiffy = 226 cpu_data(freq->cpu).loops_per_jiffy =
227 cpufreq_scale(loops_per_jiffy_ref, 227 cpufreq_scale(loops_per_jiffy_ref,
228 ref_freq, freq->new); 228 ref_freq, freq->new);
229 229
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c
index 9f22e542c374..9c70af45b42b 100644
--- a/arch/x86/kernel/tsc_64.c
+++ b/arch/x86/kernel/tsc_64.c
@@ -73,13 +73,13 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
73 struct cpufreq_freqs *freq = data; 73 struct cpufreq_freqs *freq = data;
74 unsigned long *lpj, dummy; 74 unsigned long *lpj, dummy;
75 75
76 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC)) 76 if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
77 return 0; 77 return 0;
78 78
79 lpj = &dummy; 79 lpj = &dummy;
80 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 80 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
81#ifdef CONFIG_SMP 81#ifdef CONFIG_SMP
82 lpj = &cpu_data[freq->cpu].loops_per_jiffy; 82 lpj = &cpu_data(freq->cpu).loops_per_jiffy;
83#else 83#else
84 lpj = &boot_cpu_data.loops_per_jiffy; 84 lpj = &boot_cpu_data.loops_per_jiffy;
85#endif 85#endif
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 585541ca1a7e..78f2250963ae 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -48,7 +48,7 @@
48 ({unsigned long v; \ 48 ({unsigned long v; \
49 extern char __vsyscall_0; \ 49 extern char __vsyscall_0; \
50 asm("" : "=r" (v) : "0" (x)); \ 50 asm("" : "=r" (v) : "0" (x)); \
51 ((v - VSYSCALL_FIRST_PAGE) + __pa_symbol(&__vsyscall_0)); }) 51 ((v - VSYSCALL_START) + __pa_symbol(&__vsyscall_0)); })
52 52
53/* 53/*
54 * vsyscall_gtod_data contains data that is : 54 * vsyscall_gtod_data contains data that is :
@@ -291,7 +291,7 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
291#ifdef CONFIG_NUMA 291#ifdef CONFIG_NUMA
292 node = cpu_to_node(cpu); 292 node = cpu_to_node(cpu);
293#endif 293#endif
294 if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) 294 if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
295 write_rdtscp_aux((node << 12) | cpu); 295 write_rdtscp_aux((node << 12) | cpu);
296 296
297 /* Store cpu number in limit so that it can be loaded quickly 297 /* Store cpu number in limit so that it can be loaded quickly
diff --git a/arch/x86/lib/delay_32.c b/arch/x86/lib/delay_32.c
index f6edb11364df..952e7a89c2ac 100644
--- a/arch/x86/lib/delay_32.c
+++ b/arch/x86/lib/delay_32.c
@@ -82,7 +82,7 @@ inline void __const_udelay(unsigned long xloops)
82 __asm__("mull %0" 82 __asm__("mull %0"
83 :"=d" (xloops), "=&a" (d0) 83 :"=d" (xloops), "=&a" (d0)
84 :"1" (xloops), "0" 84 :"1" (xloops), "0"
85 (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4))); 85 (cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4)));
86 86
87 __delay(++xloops); 87 __delay(++xloops);
88} 88}
diff --git a/arch/x86/lib/delay_64.c b/arch/x86/lib/delay_64.c
index 2dbebd308347..0ebbfb9e7c7f 100644
--- a/arch/x86/lib/delay_64.c
+++ b/arch/x86/lib/delay_64.c
@@ -40,7 +40,8 @@ EXPORT_SYMBOL(__delay);
40 40
41inline void __const_udelay(unsigned long xloops) 41inline void __const_udelay(unsigned long xloops)
42{ 42{
43 __delay(((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) + 1); 43 __delay(((xloops * HZ *
44 cpu_data(raw_smp_processor_id()).loops_per_jiffy) >> 32) + 1);
44} 45}
45EXPORT_SYMBOL(__const_udelay); 46EXPORT_SYMBOL(__const_udelay);
46 47
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index e4928aa6bdfb..f93a730b44d0 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -36,8 +36,8 @@ static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR
36 36
37/* per CPU data structure (for /proc/cpuinfo et al), visible externally 37/* per CPU data structure (for /proc/cpuinfo et al), visible externally
38 * indexed physically */ 38 * indexed physically */
39struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; 39DEFINE_PER_CPU(cpuinfo_x86, cpu_info) __cacheline_aligned;
40EXPORT_SYMBOL(cpu_data); 40EXPORT_PER_CPU_SYMBOL(cpu_info);
41 41
42/* physical ID of the CPU used to boot the system */ 42/* physical ID of the CPU used to boot the system */
43unsigned char boot_cpu_id; 43unsigned char boot_cpu_id;
@@ -430,7 +430,7 @@ find_smp_config(void)
430void __init 430void __init
431smp_store_cpu_info(int id) 431smp_store_cpu_info(int id)
432{ 432{
433 struct cpuinfo_x86 *c=&cpu_data[id]; 433 struct cpuinfo_x86 *c = &cpu_data(id);
434 434
435 *c = boot_cpu_data; 435 *c = boot_cpu_data;
436 436
@@ -634,7 +634,7 @@ do_boot_cpu(__u8 cpu)
634 cpu, smp_processor_id())); 634 cpu, smp_processor_id()));
635 635
636 printk("CPU%d: ", cpu); 636 printk("CPU%d: ", cpu);
637 print_cpu_info(&cpu_data[cpu]); 637 print_cpu_info(&cpu_data(cpu));
638 wmb(); 638 wmb();
639 cpu_set(cpu, cpu_callout_map); 639 cpu_set(cpu, cpu_callout_map);
640 cpu_set(cpu, cpu_present_map); 640 cpu_set(cpu, cpu_present_map);
@@ -683,7 +683,7 @@ smp_boot_cpus(void)
683 */ 683 */
684 smp_store_cpu_info(boot_cpu_id); 684 smp_store_cpu_info(boot_cpu_id);
685 printk("CPU%d: ", boot_cpu_id); 685 printk("CPU%d: ", boot_cpu_id);
686 print_cpu_info(&cpu_data[boot_cpu_id]); 686 print_cpu_info(&cpu_data(boot_cpu_id));
687 687
688 if(is_cpu_quad()) { 688 if(is_cpu_quad()) {
689 /* booting on a Quad CPU */ 689 /* booting on a Quad CPU */
@@ -714,7 +714,7 @@ smp_boot_cpus(void)
714 unsigned long bogosum = 0; 714 unsigned long bogosum = 0;
715 for (i = 0; i < NR_CPUS; i++) 715 for (i = 0; i < NR_CPUS; i++)
716 if (cpu_isset(i, cpu_online_map)) 716 if (cpu_isset(i, cpu_online_map))
717 bogosum += cpu_data[i].loops_per_jiffy; 717 bogosum += cpu_data(i).loops_per_jiffy;
718 printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 718 printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
719 cpucount+1, 719 cpucount+1,
720 bogosum/(500000/HZ), 720 bogosum/(500000/HZ),
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
index 4d3e538c57ab..b695d70e998c 100644
--- a/arch/x86/mm/fault_32.c
+++ b/arch/x86/mm/fault_32.c
@@ -564,7 +564,8 @@ no_context:
564 * it's allocated already. 564 * it's allocated already.
565 */ 565 */
566 if ((page >> PAGE_SHIFT) < max_low_pfn 566 if ((page >> PAGE_SHIFT) < max_low_pfn
567 && (page & _PAGE_PRESENT)) { 567 && (page & _PAGE_PRESENT)
568 && !(page & _PAGE_PSE)) {
568 page &= PAGE_MASK; 569 page &= PAGE_MASK;
569 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) 570 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
570 & (PTRS_PER_PTE - 1)]; 571 & (PTRS_PER_PTE - 1)];
diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c
index 5149ac136a5d..00be7f0a71b2 100644
--- a/arch/x86/mm/fault_64.c
+++ b/arch/x86/mm/fault_64.c
@@ -169,7 +169,7 @@ void dump_pagetable(unsigned long address)
169 pmd = pmd_offset(pud, address); 169 pmd = pmd_offset(pud, address);
170 if (bad_address(pmd)) goto bad; 170 if (bad_address(pmd)) goto bad;
171 printk("PMD %lx ", pmd_val(*pmd)); 171 printk("PMD %lx ", pmd_val(*pmd));
172 if (!pmd_present(*pmd)) goto ret; 172 if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
173 173
174 pte = pte_offset_kernel(pmd, address); 174 pte = pte_offset_kernel(pmd, address);
175 if (bad_address(pte)) goto bad; 175 if (bad_address(pte)) goto bad;
@@ -285,7 +285,6 @@ static int vmalloc_fault(unsigned long address)
285 return 0; 285 return 0;
286} 286}
287 287
288static int page_fault_trace;
289int show_unhandled_signals = 1; 288int show_unhandled_signals = 1;
290 289
291/* 290/*
@@ -354,10 +353,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
354 if (likely(regs->eflags & X86_EFLAGS_IF)) 353 if (likely(regs->eflags & X86_EFLAGS_IF))
355 local_irq_enable(); 354 local_irq_enable();
356 355
357 if (unlikely(page_fault_trace))
358 printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
359 regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
360
361 if (unlikely(error_code & PF_RSVD)) 356 if (unlikely(error_code & PF_RSVD))
362 pgtable_bad(address, regs, error_code); 357 pgtable_bad(address, regs, error_code);
363 358
@@ -488,7 +483,7 @@ bad_area_nosemaphore:
488 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 483 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
489 printk_ratelimit()) { 484 printk_ratelimit()) {
490 printk( 485 printk(
491 "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n", 486 "%s%s[%d]: segfault at %lx rip %lx rsp %lx error %lx\n",
492 tsk->pid > 1 ? KERN_INFO : KERN_EMERG, 487 tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
493 tsk->comm, tsk->pid, address, regs->rip, 488 tsk->comm, tsk->pid, address, regs->rip,
494 regs->rsp, error_code); 489 regs->rsp, error_code);
@@ -621,10 +616,3 @@ void vmalloc_sync_all(void)
621 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == 616 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
622 (__START_KERNEL & PGDIR_MASK))); 617 (__START_KERNEL & PGDIR_MASK)));
623} 618}
624
625static int __init enable_pagefaulttrace(char *str)
626{
627 page_fault_trace = 1;
628 return 1;
629}
630__setup("pagefaulttrace", enable_pagefaulttrace);
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 5eec5e56d07f..3d6926ba8995 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -612,7 +612,7 @@ void __init init_cpu_to_node(void)
612{ 612{
613 int i; 613 int i;
614 for (i = 0; i < NR_CPUS; i++) { 614 for (i = 0; i < NR_CPUS; i++) {
615 u8 apicid = x86_cpu_to_apicid[i]; 615 u8 apicid = x86_cpu_to_apicid_init[i];
616 if (apicid == BAD_APICID) 616 if (apicid == BAD_APICID)
617 continue; 617 continue;
618 if (apicid_to_node[apicid] == NUMA_NO_NODE) 618 if (apicid_to_node[apicid] == NUMA_NO_NODE)
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index c049ce414f01..0ed046a187f7 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -13,25 +13,45 @@
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/uaccess.h> 15#include <asm/uaccess.h>
16#include <asm/stacktrace.h>
16 17
17struct frame_head { 18static void backtrace_warning_symbol(void *data, char *msg,
18 struct frame_head * ebp; 19 unsigned long symbol)
19 unsigned long ret; 20{
20} __attribute__((packed)); 21 /* Ignore warnings */
22}
21 23
22static struct frame_head * 24static void backtrace_warning(void *data, char *msg)
23dump_kernel_backtrace(struct frame_head * head)
24{ 25{
25 oprofile_add_trace(head->ret); 26 /* Ignore warnings */
27}
26 28
27 /* frame pointers should strictly progress back up the stack 29static int backtrace_stack(void *data, char *name)
28 * (towards higher addresses) */ 30{
29 if (head >= head->ebp) 31 /* Yes, we want all stacks */
30 return NULL; 32 return 0;
33}
34
35static void backtrace_address(void *data, unsigned long addr)
36{
37 unsigned int *depth = data;
31 38
32 return head->ebp; 39 if ((*depth)--)
40 oprofile_add_trace(addr);
33} 41}
34 42
43static struct stacktrace_ops backtrace_ops = {
44 .warning = backtrace_warning,
45 .warning_symbol = backtrace_warning_symbol,
46 .stack = backtrace_stack,
47 .address = backtrace_address,
48};
49
50struct frame_head {
51 struct frame_head *ebp;
52 unsigned long ret;
53} __attribute__((packed));
54
35static struct frame_head * 55static struct frame_head *
36dump_user_backtrace(struct frame_head * head) 56dump_user_backtrace(struct frame_head * head)
37{ 57{
@@ -53,72 +73,16 @@ dump_user_backtrace(struct frame_head * head)
53 return bufhead[0].ebp; 73 return bufhead[0].ebp;
54} 74}
55 75
56/*
57 * | | /\ Higher addresses
58 * | |
59 * --------------- stack base (address of current_thread_info)
60 * | thread info |
61 * . .
62 * | stack |
63 * --------------- saved regs->ebp value if valid (frame_head address)
64 * . .
65 * --------------- saved regs->rsp value if x86_64
66 * | |
67 * --------------- struct pt_regs * stored on stack if 32-bit
68 * | |
69 * . .
70 * | |
71 * --------------- %esp
72 * | |
73 * | | \/ Lower addresses
74 *
75 * Thus, regs (or regs->rsp for x86_64) <-> stack base restricts the
76 * valid(ish) ebp values. Note: (1) for x86_64, NMI and several other
77 * exceptions use special stacks, maintained by the interrupt stack table
78 * (IST). These stacks are set up in trap_init() in
79 * arch/x86_64/kernel/traps.c. Thus, for x86_64, regs now does not point
80 * to the kernel stack; instead, it points to some location on the NMI
81 * stack. On the other hand, regs->rsp is the stack pointer saved when the
82 * NMI occurred. (2) For 32-bit, regs->esp is not valid because the
83 * processor does not save %esp on the kernel stack when interrupts occur
84 * in the kernel mode.
85 */
86#ifdef CONFIG_FRAME_POINTER
87static int valid_kernel_stack(struct frame_head * head, struct pt_regs * regs)
88{
89 unsigned long headaddr = (unsigned long)head;
90#ifdef CONFIG_X86_64
91 unsigned long stack = (unsigned long)regs->rsp;
92#else
93 unsigned long stack = (unsigned long)regs;
94#endif
95 unsigned long stack_base = (stack & ~(THREAD_SIZE - 1)) + THREAD_SIZE;
96
97 return headaddr > stack && headaddr < stack_base;
98}
99#else
100/* without fp, it's just junk */
101static int valid_kernel_stack(struct frame_head * head, struct pt_regs * regs)
102{
103 return 0;
104}
105#endif
106
107
108void 76void
109x86_backtrace(struct pt_regs * const regs, unsigned int depth) 77x86_backtrace(struct pt_regs * const regs, unsigned int depth)
110{ 78{
111 struct frame_head *head; 79 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
112 80 unsigned long stack = stack_pointer(regs);
113#ifdef CONFIG_X86_64
114 head = (struct frame_head *)regs->rbp;
115#else
116 head = (struct frame_head *)regs->ebp;
117#endif
118 81
119 if (!user_mode_vm(regs)) { 82 if (!user_mode_vm(regs)) {
120 while (depth-- && valid_kernel_stack(head, regs)) 83 if (depth)
121 head = dump_kernel_backtrace(head); 84 dump_trace(NULL, regs, (unsigned long *)stack,
85 &backtrace_ops, &depth);
122 return; 86 return;
123 } 87 }
124 88
diff --git a/arch/x86_64/.gitignore b/arch/x86_64/.gitignore
new file mode 100644
index 000000000000..36ef4c374d25
--- /dev/null
+++ b/arch/x86_64/.gitignore
@@ -0,0 +1 @@
boot
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 25785b23df87..aab25f3ba3ce 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -723,7 +723,7 @@ config ARCH_HIBERNATION_HEADER
723 723
724source "drivers/acpi/Kconfig" 724source "drivers/acpi/Kconfig"
725 725
726source "arch/x86/kernel/cpufreq/Kconfig" 726source "arch/x86/kernel/cpu/cpufreq/Kconfig_64"
727 727
728source "drivers/cpuidle/Kconfig" 728source "drivers/cpuidle/Kconfig"
729 729
@@ -768,9 +768,9 @@ source "fs/Kconfig.binfmt"
768config IA32_EMULATION 768config IA32_EMULATION
769 bool "IA32 Emulation" 769 bool "IA32 Emulation"
770 help 770 help
771 Include code to run 32-bit programs under a 64-bit kernel. You should likely 771 Include code to run 32-bit programs under a 64-bit kernel. You should
772 turn this on, unless you're 100% sure that you don't have any 32-bit programs 772 likely turn this on, unless you're 100% sure that you don't have any
773 left. 773 32-bit programs left.
774 774
775config IA32_AOUT 775config IA32_AOUT
776 tristate "IA32 a.out support" 776 tristate "IA32 a.out support"
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index 03e1ede27b85..6d89ab762ffc 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -74,7 +74,7 @@ KBUILD_CFLAGS += $(cflags-y)
74CFLAGS_KERNEL += $(cflags-kernel-y) 74CFLAGS_KERNEL += $(cflags-kernel-y)
75KBUILD_AFLAGS += -m64 75KBUILD_AFLAGS += -m64
76 76
77head-y := arch/x86/kernel/head_64.o arch/x86/kernel/head64.o arch/x86/kernel/init_task_64.o 77head-y := arch/x86/kernel/head_64.o arch/x86/kernel/head64.o arch/x86/kernel/init_task.o
78 78
79libs-y += arch/x86/lib/ 79libs-y += arch/x86/lib/
80core-y += arch/x86/kernel/ \ 80core-y += arch/x86/kernel/ \
@@ -97,9 +97,9 @@ BOOTIMAGE := arch/x86/boot/bzImage
97KBUILD_IMAGE := $(BOOTIMAGE) 97KBUILD_IMAGE := $(BOOTIMAGE)
98 98
99bzImage: vmlinux 99bzImage: vmlinux
100 $(Q)mkdir -p $(objtree)/arch/x86_64/boot
101 $(Q)ln -fsn $(objtree)/arch/x86/boot/bzImage $(objtree)/arch/x86_64/boot/bzImage
102 $(Q)$(MAKE) $(build)=$(boot) $(BOOTIMAGE) 100 $(Q)$(MAKE) $(build)=$(boot) $(BOOTIMAGE)
101 $(Q)mkdir -p $(objtree)/arch/x86_64/boot
102 $(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/x86_64/boot/bzImage
103 103
104bzlilo: vmlinux 104bzlilo: vmlinux
105 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zlilo 105 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zlilo