aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-10 18:51:06 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-10 18:51:06 -0500
commit0811a433c61e85f895018239c4466a36311cd5de (patch)
tree276933e518e5525d24ae37b02df2db9909679260 /arch/x86/kernel
parentc299030765292434b73572f9bcfe84951ff06614 (diff)
parent3d14bdad40315b54470cb7812293d14c8af2bf7d (diff)
Merge branch 'linus' into core/iommu
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/acpi/boot.c17
-rw-r--r--arch/x86/kernel/acpi/cstate.c74
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/apic.c14
-rw-r--r--arch/x86/kernel/apm_32.c4
-rw-r--r--arch/x86/kernel/cpu/common.c26
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c170
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longhaul.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c8
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.h2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c35
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c18
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_32.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/p5.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/p6.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/winchip.c2
-rw-r--r--arch/x86/kernel/e820.c21
-rw-r--r--arch/x86/kernel/early-quirks.c22
-rw-r--r--arch/x86/kernel/head_64.S2
-rw-r--r--arch/x86/kernel/i8259.c8
-rw-r--r--arch/x86/kernel/io_apic.c2
-rw-r--r--arch/x86/kernel/ioport.c4
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/irq_32.c10
-rw-r--r--arch/x86/kernel/irq_64.c22
-rw-r--r--arch/x86/kernel/irqinit_32.c12
-rw-r--r--arch/x86/kernel/irqinit_64.c8
-rw-r--r--arch/x86/kernel/kprobes.c9
-rw-r--r--arch/x86/kernel/mfgpt_32.c2
-rw-r--r--arch/x86/kernel/mpparse.c350
-rw-r--r--arch/x86/kernel/nmi.c1
-rw-r--r--arch/x86/kernel/numaq_32.c38
-rw-r--r--arch/x86/kernel/pci-dma.c2
-rw-r--r--arch/x86/kernel/process_32.c19
-rw-r--r--arch/x86/kernel/setup_percpu.c36
-rw-r--r--arch/x86/kernel/smp.c19
-rw-r--r--arch/x86/kernel/smpboot.c130
-rw-r--r--arch/x86/kernel/time_32.c4
-rw-r--r--arch/x86/kernel/time_64.c2
-rw-r--r--arch/x86/kernel/traps.c5
-rw-r--r--arch/x86/kernel/visws_quirks.c32
43 files changed, 610 insertions, 540 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 29dc0c89d4a..d37593c2f43 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -47,7 +47,7 @@
47#endif 47#endif
48 48
49static int __initdata acpi_force = 0; 49static int __initdata acpi_force = 0;
50 50u32 acpi_rsdt_forced;
51#ifdef CONFIG_ACPI 51#ifdef CONFIG_ACPI
52int acpi_disabled = 0; 52int acpi_disabled = 0;
53#else 53#else
@@ -1374,6 +1374,17 @@ static void __init acpi_process_madt(void)
1374 "Invalid BIOS MADT, disabling ACPI\n"); 1374 "Invalid BIOS MADT, disabling ACPI\n");
1375 disable_acpi(); 1375 disable_acpi();
1376 } 1376 }
1377 } else {
1378 /*
1379 * ACPI found no MADT, and so ACPI wants UP PIC mode.
1380 * In the event an MPS table was found, forget it.
1381 * Boot with "acpi=off" to use MPS on such a system.
1382 */
1383 if (smp_found_config) {
1384 printk(KERN_WARNING PREFIX
1385 "No APIC-table, disabling MPS\n");
1386 smp_found_config = 0;
1387 }
1377 } 1388 }
1378 1389
1379 /* 1390 /*
@@ -1809,6 +1820,10 @@ static int __init parse_acpi(char *arg)
1809 disable_acpi(); 1820 disable_acpi();
1810 acpi_ht = 1; 1821 acpi_ht = 1;
1811 } 1822 }
1823 /* acpi=rsdt use RSDT instead of XSDT */
1824 else if (strcmp(arg, "rsdt") == 0) {
1825 acpi_rsdt_forced = 1;
1826 }
1812 /* "acpi=noirq" disables ACPI interrupt routing */ 1827 /* "acpi=noirq" disables ACPI interrupt routing */
1813 else if (strcmp(arg, "noirq") == 0) { 1828 else if (strcmp(arg, "noirq") == 0) {
1814 acpi_noirq_set(); 1829 acpi_noirq_set();
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index c2502eb9aa8..bbbe4bbb6f3 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -56,6 +56,7 @@ static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */
56static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; 56static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
57 57
58#define MWAIT_SUBSTATE_MASK (0xf) 58#define MWAIT_SUBSTATE_MASK (0xf)
59#define MWAIT_CSTATE_MASK (0xf)
59#define MWAIT_SUBSTATE_SIZE (4) 60#define MWAIT_SUBSTATE_SIZE (4)
60 61
61#define CPUID_MWAIT_LEAF (5) 62#define CPUID_MWAIT_LEAF (5)
@@ -66,39 +67,20 @@ static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
66 67
67#define NATIVE_CSTATE_BEYOND_HALT (2) 68#define NATIVE_CSTATE_BEYOND_HALT (2)
68 69
69int acpi_processor_ffh_cstate_probe(unsigned int cpu, 70static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
70 struct acpi_processor_cx *cx, struct acpi_power_register *reg)
71{ 71{
72 struct cstate_entry *percpu_entry; 72 struct acpi_processor_cx *cx = _cx;
73 struct cpuinfo_x86 *c = &cpu_data(cpu); 73 long retval;
74
75 cpumask_t saved_mask;
76 int retval;
77 unsigned int eax, ebx, ecx, edx; 74 unsigned int eax, ebx, ecx, edx;
78 unsigned int edx_part; 75 unsigned int edx_part;
79 unsigned int cstate_type; /* C-state type and not ACPI C-state type */ 76 unsigned int cstate_type; /* C-state type and not ACPI C-state type */
80 unsigned int num_cstate_subtype; 77 unsigned int num_cstate_subtype;
81 78
82 if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF )
83 return -1;
84
85 if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT)
86 return -1;
87
88 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
89 percpu_entry->states[cx->index].eax = 0;
90 percpu_entry->states[cx->index].ecx = 0;
91
92 /* Make sure we are running on right CPU */
93 saved_mask = current->cpus_allowed;
94 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
95 if (retval)
96 return -1;
97
98 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); 79 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
99 80
100 /* Check whether this particular cx_type (in CST) is supported or not */ 81 /* Check whether this particular cx_type (in CST) is supported or not */
101 cstate_type = (cx->address >> MWAIT_SUBSTATE_SIZE) + 1; 82 cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) &
83 MWAIT_CSTATE_MASK) + 1;
102 edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE); 84 edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
103 num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK; 85 num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
104 86
@@ -114,21 +96,45 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
114 retval = -1; 96 retval = -1;
115 goto out; 97 goto out;
116 } 98 }
117 percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
118
119 /* Use the hint in CST */
120 percpu_entry->states[cx->index].eax = cx->address;
121 99
122 if (!mwait_supported[cstate_type]) { 100 if (!mwait_supported[cstate_type]) {
123 mwait_supported[cstate_type] = 1; 101 mwait_supported[cstate_type] = 1;
124 printk(KERN_DEBUG "Monitor-Mwait will be used to enter C-%d " 102 printk(KERN_DEBUG
125 "state\n", cx->type); 103 "Monitor-Mwait will be used to enter C-%d "
104 "state\n", cx->type);
126 } 105 }
127 snprintf(cx->desc, ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x", 106 snprintf(cx->desc,
128 cx->address); 107 ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x",
129 108 cx->address);
130out: 109out:
131 set_cpus_allowed_ptr(current, &saved_mask); 110 return retval;
111}
112
113int acpi_processor_ffh_cstate_probe(unsigned int cpu,
114 struct acpi_processor_cx *cx, struct acpi_power_register *reg)
115{
116 struct cstate_entry *percpu_entry;
117 struct cpuinfo_x86 *c = &cpu_data(cpu);
118 long retval;
119
120 if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF)
121 return -1;
122
123 if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT)
124 return -1;
125
126 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
127 percpu_entry->states[cx->index].eax = 0;
128 percpu_entry->states[cx->index].ecx = 0;
129
130 /* Make sure we are running on right CPU */
131
132 retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx);
133 if (retval == 0) {
134 /* Use the hint in CST */
135 percpu_entry->states[cx->index].eax = cx->address;
136 percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
137 }
132 return retval; 138 return retval;
133} 139}
134EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); 140EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 806b4e9051b..707c1f6f95f 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -159,6 +159,8 @@ static int __init acpi_sleep_setup(char *str)
159#endif 159#endif
160 if (strncmp(str, "old_ordering", 12) == 0) 160 if (strncmp(str, "old_ordering", 12) == 0)
161 acpi_old_suspend_ordering(); 161 acpi_old_suspend_ordering();
162 if (strncmp(str, "s4_nonvs", 8) == 0)
163 acpi_s4_no_nvs();
162 str = strchr(str, ','); 164 str = strchr(str, ',');
163 if (str != NULL) 165 if (str != NULL)
164 str += strspn(str, ", \t"); 166 str += strspn(str, ", \t");
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index b13d3c4dbd4..566a08466b1 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -31,9 +31,11 @@
31#include <linux/dmi.h> 31#include <linux/dmi.h>
32#include <linux/dmar.h> 32#include <linux/dmar.h>
33#include <linux/ftrace.h> 33#include <linux/ftrace.h>
34#include <linux/smp.h>
35#include <linux/nmi.h>
36#include <linux/timex.h>
34 37
35#include <asm/atomic.h> 38#include <asm/atomic.h>
36#include <asm/smp.h>
37#include <asm/mtrr.h> 39#include <asm/mtrr.h>
38#include <asm/mpspec.h> 40#include <asm/mpspec.h>
39#include <asm/desc.h> 41#include <asm/desc.h>
@@ -41,10 +43,8 @@
41#include <asm/hpet.h> 43#include <asm/hpet.h>
42#include <asm/pgalloc.h> 44#include <asm/pgalloc.h>
43#include <asm/i8253.h> 45#include <asm/i8253.h>
44#include <asm/nmi.h>
45#include <asm/idle.h> 46#include <asm/idle.h>
46#include <asm/proto.h> 47#include <asm/proto.h>
47#include <asm/timex.h>
48#include <asm/apic.h> 48#include <asm/apic.h>
49#include <asm/i8259.h> 49#include <asm/i8259.h>
50 50
@@ -687,7 +687,7 @@ static int __init calibrate_APIC_clock(void)
687 local_irq_enable(); 687 local_irq_enable();
688 688
689 if (levt->features & CLOCK_EVT_FEAT_DUMMY) { 689 if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
690 pr_warning("APIC timer disabled due to verification failure.\n"); 690 pr_warning("APIC timer disabled due to verification failure\n");
691 return -1; 691 return -1;
692 } 692 }
693 693
@@ -2087,14 +2087,12 @@ __cpuinit int apic_is_clustered_box(void)
2087 /* are we being called early in kernel startup? */ 2087 /* are we being called early in kernel startup? */
2088 if (bios_cpu_apicid) { 2088 if (bios_cpu_apicid) {
2089 id = bios_cpu_apicid[i]; 2089 id = bios_cpu_apicid[i];
2090 } 2090 } else if (i < nr_cpu_ids) {
2091 else if (i < nr_cpu_ids) {
2092 if (cpu_present(i)) 2091 if (cpu_present(i))
2093 id = per_cpu(x86_bios_cpu_apicid, i); 2092 id = per_cpu(x86_bios_cpu_apicid, i);
2094 else 2093 else
2095 continue; 2094 continue;
2096 } 2095 } else
2097 else
2098 break; 2096 break;
2099 2097
2100 if (id != BAD_APICID) 2098 if (id != BAD_APICID)
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 3a26525a3f3..98807bb095a 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -160,9 +160,9 @@
160 * Work around byte swap bug in one of the Vaio's BIOS's 160 * Work around byte swap bug in one of the Vaio's BIOS's
161 * (Marc Boucher <marc@mbsi.ca>). 161 * (Marc Boucher <marc@mbsi.ca>).
162 * Exposed the disable flag to dmi so that we can handle known 162 * Exposed the disable flag to dmi so that we can handle known
163 * broken APM (Alan Cox <alan@redhat.com>). 163 * broken APM (Alan Cox <alan@lxorguk.ukuu.org.uk>).
164 * 1.14ac: If the BIOS says "I slowed the CPU down" then don't spin 164 * 1.14ac: If the BIOS says "I slowed the CPU down" then don't spin
165 * calling it - instead idle. (Alan Cox <alan@redhat.com>) 165 * calling it - instead idle. (Alan Cox <alan@lxorguk.ukuu.org.uk>)
166 * If an APM idle fails log it and idle sensibly 166 * If an APM idle fails log it and idle sensibly
167 * 1.15: Don't queue events to clients who open the device O_WRONLY. 167 * 1.15: Don't queue events to clients who open the device O_WRONLY.
168 * Don't expect replies from clients who open the device O_RDONLY. 168 * Don't expect replies from clients who open the device O_RDONLY.
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 3f95a40f718..83492b1f93b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -40,6 +40,26 @@
40 40
41#include "cpu.h" 41#include "cpu.h"
42 42
43#ifdef CONFIG_X86_64
44
45/* all of these masks are initialized in setup_cpu_local_masks() */
46cpumask_var_t cpu_callin_mask;
47cpumask_var_t cpu_callout_mask;
48cpumask_var_t cpu_initialized_mask;
49
50/* representing cpus for which sibling maps can be computed */
51cpumask_var_t cpu_sibling_setup_mask;
52
53#else /* CONFIG_X86_32 */
54
55cpumask_t cpu_callin_map;
56cpumask_t cpu_callout_map;
57cpumask_t cpu_initialized;
58cpumask_t cpu_sibling_setup_map;
59
60#endif /* CONFIG_X86_32 */
61
62
43static struct cpu_dev *this_cpu __cpuinitdata; 63static struct cpu_dev *this_cpu __cpuinitdata;
44 64
45#ifdef CONFIG_X86_64 65#ifdef CONFIG_X86_64
@@ -856,8 +876,6 @@ static __init int setup_disablecpuid(char *arg)
856} 876}
857__setup("clearcpuid=", setup_disablecpuid); 877__setup("clearcpuid=", setup_disablecpuid);
858 878
859cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
860
861#ifdef CONFIG_X86_64 879#ifdef CONFIG_X86_64
862struct x8664_pda **_cpu_pda __read_mostly; 880struct x8664_pda **_cpu_pda __read_mostly;
863EXPORT_SYMBOL(_cpu_pda); 881EXPORT_SYMBOL(_cpu_pda);
@@ -976,7 +994,7 @@ void __cpuinit cpu_init(void)
976 994
977 me = current; 995 me = current;
978 996
979 if (cpu_test_and_set(cpu, cpu_initialized)) 997 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
980 panic("CPU#%d already initialized!\n", cpu); 998 panic("CPU#%d already initialized!\n", cpu);
981 999
982 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1000 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
@@ -1085,7 +1103,7 @@ void __cpuinit cpu_init(void)
1085 struct tss_struct *t = &per_cpu(init_tss, cpu); 1103 struct tss_struct *t = &per_cpu(init_tss, cpu);
1086 struct thread_struct *thread = &curr->thread; 1104 struct thread_struct *thread = &curr->thread;
1087 1105
1088 if (cpu_test_and_set(cpu, cpu_initialized)) { 1106 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
1089 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); 1107 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
1090 for (;;) local_irq_enable(); 1108 for (;;) local_irq_enable();
1091 } 1109 }
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 28102ad1a36..06fcd8f9323 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -145,13 +145,14 @@ typedef union {
145 145
146struct drv_cmd { 146struct drv_cmd {
147 unsigned int type; 147 unsigned int type;
148 cpumask_t mask; 148 cpumask_var_t mask;
149 drv_addr_union addr; 149 drv_addr_union addr;
150 u32 val; 150 u32 val;
151}; 151};
152 152
153static void do_drv_read(struct drv_cmd *cmd) 153static long do_drv_read(void *_cmd)
154{ 154{
155 struct drv_cmd *cmd = _cmd;
155 u32 h; 156 u32 h;
156 157
157 switch (cmd->type) { 158 switch (cmd->type) {
@@ -166,10 +167,12 @@ static void do_drv_read(struct drv_cmd *cmd)
166 default: 167 default:
167 break; 168 break;
168 } 169 }
170 return 0;
169} 171}
170 172
171static void do_drv_write(struct drv_cmd *cmd) 173static long do_drv_write(void *_cmd)
172{ 174{
175 struct drv_cmd *cmd = _cmd;
173 u32 lo, hi; 176 u32 lo, hi;
174 177
175 switch (cmd->type) { 178 switch (cmd->type) {
@@ -186,48 +189,41 @@ static void do_drv_write(struct drv_cmd *cmd)
186 default: 189 default:
187 break; 190 break;
188 } 191 }
192 return 0;
189} 193}
190 194
191static void drv_read(struct drv_cmd *cmd) 195static void drv_read(struct drv_cmd *cmd)
192{ 196{
193 cpumask_t saved_mask = current->cpus_allowed;
194 cmd->val = 0; 197 cmd->val = 0;
195 198
196 set_cpus_allowed_ptr(current, &cmd->mask); 199 work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd);
197 do_drv_read(cmd);
198 set_cpus_allowed_ptr(current, &saved_mask);
199} 200}
200 201
201static void drv_write(struct drv_cmd *cmd) 202static void drv_write(struct drv_cmd *cmd)
202{ 203{
203 cpumask_t saved_mask = current->cpus_allowed;
204 unsigned int i; 204 unsigned int i;
205 205
206 for_each_cpu_mask_nr(i, cmd->mask) { 206 for_each_cpu(i, cmd->mask) {
207 set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); 207 work_on_cpu(i, do_drv_write, cmd);
208 do_drv_write(cmd);
209 } 208 }
210
211 set_cpus_allowed_ptr(current, &saved_mask);
212 return;
213} 209}
214 210
215static u32 get_cur_val(const cpumask_t *mask) 211static u32 get_cur_val(const struct cpumask *mask)
216{ 212{
217 struct acpi_processor_performance *perf; 213 struct acpi_processor_performance *perf;
218 struct drv_cmd cmd; 214 struct drv_cmd cmd;
219 215
220 if (unlikely(cpus_empty(*mask))) 216 if (unlikely(cpumask_empty(mask)))
221 return 0; 217 return 0;
222 218
223 switch (per_cpu(drv_data, first_cpu(*mask))->cpu_feature) { 219 switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) {
224 case SYSTEM_INTEL_MSR_CAPABLE: 220 case SYSTEM_INTEL_MSR_CAPABLE:
225 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 221 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
226 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 222 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
227 break; 223 break;
228 case SYSTEM_IO_CAPABLE: 224 case SYSTEM_IO_CAPABLE:
229 cmd.type = SYSTEM_IO_CAPABLE; 225 cmd.type = SYSTEM_IO_CAPABLE;
230 perf = per_cpu(drv_data, first_cpu(*mask))->acpi_data; 226 perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data;
231 cmd.addr.io.port = perf->control_register.address; 227 cmd.addr.io.port = perf->control_register.address;
232 cmd.addr.io.bit_width = perf->control_register.bit_width; 228 cmd.addr.io.bit_width = perf->control_register.bit_width;
233 break; 229 break;
@@ -235,15 +231,44 @@ static u32 get_cur_val(const cpumask_t *mask)
235 return 0; 231 return 0;
236 } 232 }
237 233
238 cmd.mask = *mask; 234 if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL)))
235 return 0;
236
237 cpumask_copy(cmd.mask, mask);
239 238
240 drv_read(&cmd); 239 drv_read(&cmd);
241 240
241 free_cpumask_var(cmd.mask);
242
242 dprintk("get_cur_val = %u\n", cmd.val); 243 dprintk("get_cur_val = %u\n", cmd.val);
243 244
244 return cmd.val; 245 return cmd.val;
245} 246}
246 247
248struct perf_cur {
249 union {
250 struct {
251 u32 lo;
252 u32 hi;
253 } split;
254 u64 whole;
255 } aperf_cur, mperf_cur;
256};
257
258
259static long read_measured_perf_ctrs(void *_cur)
260{
261 struct perf_cur *cur = _cur;
262
263 rdmsr(MSR_IA32_APERF, cur->aperf_cur.split.lo, cur->aperf_cur.split.hi);
264 rdmsr(MSR_IA32_MPERF, cur->mperf_cur.split.lo, cur->mperf_cur.split.hi);
265
266 wrmsr(MSR_IA32_APERF, 0, 0);
267 wrmsr(MSR_IA32_MPERF, 0, 0);
268
269 return 0;
270}
271
247/* 272/*
248 * Return the measured active (C0) frequency on this CPU since last call 273 * Return the measured active (C0) frequency on this CPU since last call
249 * to this function. 274 * to this function.
@@ -260,31 +285,12 @@ static u32 get_cur_val(const cpumask_t *mask)
260static unsigned int get_measured_perf(struct cpufreq_policy *policy, 285static unsigned int get_measured_perf(struct cpufreq_policy *policy,
261 unsigned int cpu) 286 unsigned int cpu)
262{ 287{
263 union { 288 struct perf_cur cur;
264 struct {
265 u32 lo;
266 u32 hi;
267 } split;
268 u64 whole;
269 } aperf_cur, mperf_cur;
270
271 cpumask_t saved_mask;
272 unsigned int perf_percent; 289 unsigned int perf_percent;
273 unsigned int retval; 290 unsigned int retval;
274 291
275 saved_mask = current->cpus_allowed; 292 if (!work_on_cpu(cpu, read_measured_perf_ctrs, &cur))
276 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
277 if (get_cpu() != cpu) {
278 /* We were not able to run on requested processor */
279 put_cpu();
280 return 0; 293 return 0;
281 }
282
283 rdmsr(MSR_IA32_APERF, aperf_cur.split.lo, aperf_cur.split.hi);
284 rdmsr(MSR_IA32_MPERF, mperf_cur.split.lo, mperf_cur.split.hi);
285
286 wrmsr(MSR_IA32_APERF, 0,0);
287 wrmsr(MSR_IA32_MPERF, 0,0);
288 294
289#ifdef __i386__ 295#ifdef __i386__
290 /* 296 /*
@@ -292,37 +298,39 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
292 * Get an approximate value. Return failure in case we cannot get 298 * Get an approximate value. Return failure in case we cannot get
293 * an approximate value. 299 * an approximate value.
294 */ 300 */
295 if (unlikely(aperf_cur.split.hi || mperf_cur.split.hi)) { 301 if (unlikely(cur.aperf_cur.split.hi || cur.mperf_cur.split.hi)) {
296 int shift_count; 302 int shift_count;
297 u32 h; 303 u32 h;
298 304
299 h = max_t(u32, aperf_cur.split.hi, mperf_cur.split.hi); 305 h = max_t(u32, cur.aperf_cur.split.hi, cur.mperf_cur.split.hi);
300 shift_count = fls(h); 306 shift_count = fls(h);
301 307
302 aperf_cur.whole >>= shift_count; 308 cur.aperf_cur.whole >>= shift_count;
303 mperf_cur.whole >>= shift_count; 309 cur.mperf_cur.whole >>= shift_count;
304 } 310 }
305 311
306 if (((unsigned long)(-1) / 100) < aperf_cur.split.lo) { 312 if (((unsigned long)(-1) / 100) < cur.aperf_cur.split.lo) {
307 int shift_count = 7; 313 int shift_count = 7;
308 aperf_cur.split.lo >>= shift_count; 314 cur.aperf_cur.split.lo >>= shift_count;
309 mperf_cur.split.lo >>= shift_count; 315 cur.mperf_cur.split.lo >>= shift_count;
310 } 316 }
311 317
312 if (aperf_cur.split.lo && mperf_cur.split.lo) 318 if (cur.aperf_cur.split.lo && cur.mperf_cur.split.lo)
313 perf_percent = (aperf_cur.split.lo * 100) / mperf_cur.split.lo; 319 perf_percent = (cur.aperf_cur.split.lo * 100) /
320 cur.mperf_cur.split.lo;
314 else 321 else
315 perf_percent = 0; 322 perf_percent = 0;
316 323
317#else 324#else
318 if (unlikely(((unsigned long)(-1) / 100) < aperf_cur.whole)) { 325 if (unlikely(((unsigned long)(-1) / 100) < cur.aperf_cur.whole)) {
319 int shift_count = 7; 326 int shift_count = 7;
320 aperf_cur.whole >>= shift_count; 327 cur.aperf_cur.whole >>= shift_count;
321 mperf_cur.whole >>= shift_count; 328 cur.mperf_cur.whole >>= shift_count;
322 } 329 }
323 330
324 if (aperf_cur.whole && mperf_cur.whole) 331 if (cur.aperf_cur.whole && cur.mperf_cur.whole)
325 perf_percent = (aperf_cur.whole * 100) / mperf_cur.whole; 332 perf_percent = (cur.aperf_cur.whole * 100) /
333 cur.mperf_cur.whole;
326 else 334 else
327 perf_percent = 0; 335 perf_percent = 0;
328 336
@@ -330,10 +338,6 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
330 338
331 retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100; 339 retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100;
332 340
333 put_cpu();
334 set_cpus_allowed_ptr(current, &saved_mask);
335
336 dprintk("cpu %d: performance percent %d\n", cpu, perf_percent);
337 return retval; 341 return retval;
338} 342}
339 343
@@ -351,7 +355,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
351 } 355 }
352 356
353 cached_freq = data->freq_table[data->acpi_data->state].frequency; 357 cached_freq = data->freq_table[data->acpi_data->state].frequency;
354 freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data); 358 freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
355 if (freq != cached_freq) { 359 if (freq != cached_freq) {
356 /* 360 /*
357 * The dreaded BIOS frequency change behind our back. 361 * The dreaded BIOS frequency change behind our back.
@@ -386,7 +390,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
386 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); 390 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
387 struct acpi_processor_performance *perf; 391 struct acpi_processor_performance *perf;
388 struct cpufreq_freqs freqs; 392 struct cpufreq_freqs freqs;
389 cpumask_t online_policy_cpus;
390 struct drv_cmd cmd; 393 struct drv_cmd cmd;
391 unsigned int next_state = 0; /* Index into freq_table */ 394 unsigned int next_state = 0; /* Index into freq_table */
392 unsigned int next_perf_state = 0; /* Index into perf table */ 395 unsigned int next_perf_state = 0; /* Index into perf table */
@@ -401,20 +404,18 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
401 return -ENODEV; 404 return -ENODEV;
402 } 405 }
403 406
407 if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL)))
408 return -ENOMEM;
409
404 perf = data->acpi_data; 410 perf = data->acpi_data;
405 result = cpufreq_frequency_table_target(policy, 411 result = cpufreq_frequency_table_target(policy,
406 data->freq_table, 412 data->freq_table,
407 target_freq, 413 target_freq,
408 relation, &next_state); 414 relation, &next_state);
409 if (unlikely(result)) 415 if (unlikely(result)) {
410 return -ENODEV; 416 result = -ENODEV;
411 417 goto out;
412#ifdef CONFIG_HOTPLUG_CPU 418 }
413 /* cpufreq holds the hotplug lock, so we are safe from here on */
414 cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
415#else
416 online_policy_cpus = policy->cpus;
417#endif
418 419
419 next_perf_state = data->freq_table[next_state].index; 420 next_perf_state = data->freq_table[next_state].index;
420 if (perf->state == next_perf_state) { 421 if (perf->state == next_perf_state) {
@@ -425,7 +426,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
425 } else { 426 } else {
426 dprintk("Already at target state (P%d)\n", 427 dprintk("Already at target state (P%d)\n",
427 next_perf_state); 428 next_perf_state);
428 return 0; 429 goto out;
429 } 430 }
430 } 431 }
431 432
@@ -444,19 +445,19 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
444 cmd.val = (u32) perf->states[next_perf_state].control; 445 cmd.val = (u32) perf->states[next_perf_state].control;
445 break; 446 break;
446 default: 447 default:
447 return -ENODEV; 448 result = -ENODEV;
449 goto out;
448 } 450 }
449 451
450 cpus_clear(cmd.mask); 452 /* cpufreq holds the hotplug lock, so we are safe from here on */
451
452 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) 453 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
453 cmd.mask = online_policy_cpus; 454 cpumask_and(cmd.mask, cpu_online_mask, policy->cpus);
454 else 455 else
455 cpu_set(policy->cpu, cmd.mask); 456 cpumask_copy(cmd.mask, cpumask_of(policy->cpu));
456 457
457 freqs.old = perf->states[perf->state].core_frequency * 1000; 458 freqs.old = perf->states[perf->state].core_frequency * 1000;
458 freqs.new = data->freq_table[next_state].frequency; 459 freqs.new = data->freq_table[next_state].frequency;
459 for_each_cpu_mask_nr(i, cmd.mask) { 460 for_each_cpu(i, cmd.mask) {
460 freqs.cpu = i; 461 freqs.cpu = i;
461 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 462 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
462 } 463 }
@@ -464,19 +465,22 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
464 drv_write(&cmd); 465 drv_write(&cmd);
465 466
466 if (acpi_pstate_strict) { 467 if (acpi_pstate_strict) {
467 if (!check_freqs(&cmd.mask, freqs.new, data)) { 468 if (!check_freqs(cmd.mask, freqs.new, data)) {
468 dprintk("acpi_cpufreq_target failed (%d)\n", 469 dprintk("acpi_cpufreq_target failed (%d)\n",
469 policy->cpu); 470 policy->cpu);
470 return -EAGAIN; 471 result = -EAGAIN;
472 goto out;
471 } 473 }
472 } 474 }
473 475
474 for_each_cpu_mask_nr(i, cmd.mask) { 476 for_each_cpu(i, cmd.mask) {
475 freqs.cpu = i; 477 freqs.cpu = i;
476 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 478 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
477 } 479 }
478 perf->state = next_perf_state; 480 perf->state = next_perf_state;
479 481
482out:
483 free_cpumask_var(cmd.mask);
480 return result; 484 return result;
481} 485}
482 486
@@ -626,15 +630,15 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
626 */ 630 */
627 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 631 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
628 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 632 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
629 cpumask_copy(&policy->cpus, perf->shared_cpu_map); 633 cpumask_copy(policy->cpus, perf->shared_cpu_map);
630 } 634 }
631 cpumask_copy(&policy->related_cpus, perf->shared_cpu_map); 635 cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
632 636
633#ifdef CONFIG_SMP 637#ifdef CONFIG_SMP
634 dmi_check_system(sw_any_bug_dmi_table); 638 dmi_check_system(sw_any_bug_dmi_table);
635 if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) { 639 if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) {
636 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 640 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
637 policy->cpus = per_cpu(cpu_core_map, cpu); 641 cpumask_copy(policy->cpus, cpu_core_mask(cpu));
638 } 642 }
639#endif 643#endif
640 644
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
index b0461856acf..a4cff5d6e38 100644
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
@@ -982,7 +982,7 @@ static int __init longhaul_init(void)
982 case 10: 982 case 10:
983 printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n"); 983 printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n");
984 default: 984 default:
985 ;; 985 ;
986 } 986 }
987 987
988 return -ENODEV; 988 return -ENODEV;
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index beea4466b06..b585e04cbc9 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
122 return 0; 122 return 0;
123 123
124 /* notifiers */ 124 /* notifiers */
125 for_each_cpu_mask_nr(i, policy->cpus) { 125 for_each_cpu(i, policy->cpus) {
126 freqs.cpu = i; 126 freqs.cpu = i;
127 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 127 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
128 } 128 }
@@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
130 /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software 130 /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
131 * Developer's Manual, Volume 3 131 * Developer's Manual, Volume 3
132 */ 132 */
133 for_each_cpu_mask_nr(i, policy->cpus) 133 for_each_cpu(i, policy->cpus)
134 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); 134 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
135 135
136 /* notifiers */ 136 /* notifiers */
137 for_each_cpu_mask_nr(i, policy->cpus) { 137 for_each_cpu(i, policy->cpus) {
138 freqs.cpu = i; 138 freqs.cpu = i;
139 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 139 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
140 } 140 }
@@ -203,7 +203,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
203 unsigned int i; 203 unsigned int i;
204 204
205#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
206 policy->cpus = per_cpu(cpu_sibling_map, policy->cpu); 206 cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
207#endif 207#endif
208 208
209 /* Errata workaround */ 209 /* Errata workaround */
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index c3c9adbaa26..5c28b37dea1 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1199,10 +1199,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1199 set_cpus_allowed_ptr(current, &oldmask); 1199 set_cpus_allowed_ptr(current, &oldmask);
1200 1200
1201 if (cpu_family == CPU_HW_PSTATE) 1201 if (cpu_family == CPU_HW_PSTATE)
1202 pol->cpus = cpumask_of_cpu(pol->cpu); 1202 cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
1203 else 1203 else
1204 pol->cpus = per_cpu(cpu_core_map, pol->cpu); 1204 cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu));
1205 data->available_cores = &(pol->cpus); 1205 data->available_cores = pol->cpus;
1206 1206
1207 /* Take a crude guess here. 1207 /* Take a crude guess here.
1208 * That guess was in microseconds, so multiply with 1000 */ 1208 * That guess was in microseconds, so multiply with 1000 */
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
index 65cfb5d7f77..8ecc75b6c7c 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
@@ -53,7 +53,7 @@ struct powernow_k8_data {
53 /* we need to keep track of associated cores, but let cpufreq 53 /* we need to keep track of associated cores, but let cpufreq
54 * handle hotplug events - so just point at cpufreq pol->cpus 54 * handle hotplug events - so just point at cpufreq pol->cpus
55 * structure */ 55 * structure */
56 cpumask_t *available_cores; 56 struct cpumask *available_cores;
57}; 57};
58 58
59 59
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index f0ea6fa2f53..f08998278a3 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -458,11 +458,6 @@ static int centrino_verify (struct cpufreq_policy *policy)
458 * 458 *
459 * Sets a new CPUFreq policy. 459 * Sets a new CPUFreq policy.
460 */ 460 */
461struct allmasks {
462 cpumask_t saved_mask;
463 cpumask_t covered_cpus;
464};
465
466static int centrino_target (struct cpufreq_policy *policy, 461static int centrino_target (struct cpufreq_policy *policy,
467 unsigned int target_freq, 462 unsigned int target_freq,
468 unsigned int relation) 463 unsigned int relation)
@@ -472,12 +467,15 @@ static int centrino_target (struct cpufreq_policy *policy,
472 struct cpufreq_freqs freqs; 467 struct cpufreq_freqs freqs;
473 int retval = 0; 468 int retval = 0;
474 unsigned int j, k, first_cpu, tmp; 469 unsigned int j, k, first_cpu, tmp;
475 CPUMASK_ALLOC(allmasks); 470 cpumask_var_t saved_mask, covered_cpus;
476 CPUMASK_PTR(saved_mask, allmasks);
477 CPUMASK_PTR(covered_cpus, allmasks);
478 471
479 if (unlikely(allmasks == NULL)) 472 if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL)))
480 return -ENOMEM; 473 return -ENOMEM;
474 if (unlikely(!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))) {
475 free_cpumask_var(saved_mask);
476 return -ENOMEM;
477 }
478 cpumask_copy(saved_mask, &current->cpus_allowed);
481 479
482 if (unlikely(per_cpu(centrino_model, cpu) == NULL)) { 480 if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
483 retval = -ENODEV; 481 retval = -ENODEV;
@@ -493,11 +491,9 @@ static int centrino_target (struct cpufreq_policy *policy,
493 goto out; 491 goto out;
494 } 492 }
495 493
496 *saved_mask = current->cpus_allowed;
497 first_cpu = 1; 494 first_cpu = 1;
498 cpus_clear(*covered_cpus); 495 for_each_cpu(j, policy->cpus) {
499 for_each_cpu_mask_nr(j, policy->cpus) { 496 const struct cpumask *mask;
500 const cpumask_t *mask;
501 497
502 /* cpufreq holds the hotplug lock, so we are safe here */ 498 /* cpufreq holds the hotplug lock, so we are safe here */
503 if (!cpu_online(j)) 499 if (!cpu_online(j))
@@ -508,9 +504,9 @@ static int centrino_target (struct cpufreq_policy *policy,
508 * Make sure we are running on CPU that wants to change freq 504 * Make sure we are running on CPU that wants to change freq
509 */ 505 */
510 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 506 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
511 mask = &policy->cpus; 507 mask = policy->cpus;
512 else 508 else
513 mask = &cpumask_of_cpu(j); 509 mask = cpumask_of(j);
514 510
515 set_cpus_allowed_ptr(current, mask); 511 set_cpus_allowed_ptr(current, mask);
516 preempt_disable(); 512 preempt_disable();
@@ -542,7 +538,7 @@ static int centrino_target (struct cpufreq_policy *policy,
542 dprintk("target=%dkHz old=%d new=%d msr=%04x\n", 538 dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
543 target_freq, freqs.old, freqs.new, msr); 539 target_freq, freqs.old, freqs.new, msr);
544 540
545 for_each_cpu_mask_nr(k, policy->cpus) { 541 for_each_cpu(k, policy->cpus) {
546 if (!cpu_online(k)) 542 if (!cpu_online(k))
547 continue; 543 continue;
548 freqs.cpu = k; 544 freqs.cpu = k;
@@ -567,7 +563,7 @@ static int centrino_target (struct cpufreq_policy *policy,
567 preempt_enable(); 563 preempt_enable();
568 } 564 }
569 565
570 for_each_cpu_mask_nr(k, policy->cpus) { 566 for_each_cpu(k, policy->cpus) {
571 if (!cpu_online(k)) 567 if (!cpu_online(k))
572 continue; 568 continue;
573 freqs.cpu = k; 569 freqs.cpu = k;
@@ -590,7 +586,7 @@ static int centrino_target (struct cpufreq_policy *policy,
590 tmp = freqs.new; 586 tmp = freqs.new;
591 freqs.new = freqs.old; 587 freqs.new = freqs.old;
592 freqs.old = tmp; 588 freqs.old = tmp;
593 for_each_cpu_mask_nr(j, policy->cpus) { 589 for_each_cpu(j, policy->cpus) {
594 if (!cpu_online(j)) 590 if (!cpu_online(j))
595 continue; 591 continue;
596 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 592 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
@@ -605,7 +601,8 @@ migrate_end:
605 preempt_enable(); 601 preempt_enable();
606 set_cpus_allowed_ptr(current, saved_mask); 602 set_cpus_allowed_ptr(current, saved_mask);
607out: 603out:
608 CPUMASK_FREE(allmasks); 604 free_cpumask_var(saved_mask);
605 free_cpumask_var(covered_cpus);
609 return retval; 606 return retval;
610} 607}
611 608
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 04d0376b64b..dedc1e98f16 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -229,7 +229,7 @@ static unsigned int speedstep_detect_chipset (void)
229 return 0; 229 return 0;
230} 230}
231 231
232static unsigned int _speedstep_get(const cpumask_t *cpus) 232static unsigned int _speedstep_get(const struct cpumask *cpus)
233{ 233{
234 unsigned int speed; 234 unsigned int speed;
235 cpumask_t cpus_allowed; 235 cpumask_t cpus_allowed;
@@ -244,7 +244,7 @@ static unsigned int _speedstep_get(const cpumask_t *cpus)
244 244
245static unsigned int speedstep_get(unsigned int cpu) 245static unsigned int speedstep_get(unsigned int cpu)
246{ 246{
247 return _speedstep_get(&cpumask_of_cpu(cpu)); 247 return _speedstep_get(cpumask_of(cpu));
248} 248}
249 249
250/** 250/**
@@ -267,7 +267,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
267 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) 267 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate))
268 return -EINVAL; 268 return -EINVAL;
269 269
270 freqs.old = _speedstep_get(&policy->cpus); 270 freqs.old = _speedstep_get(policy->cpus);
271 freqs.new = speedstep_freqs[newstate].frequency; 271 freqs.new = speedstep_freqs[newstate].frequency;
272 freqs.cpu = policy->cpu; 272 freqs.cpu = policy->cpu;
273 273
@@ -279,20 +279,20 @@ static int speedstep_target (struct cpufreq_policy *policy,
279 279
280 cpus_allowed = current->cpus_allowed; 280 cpus_allowed = current->cpus_allowed;
281 281
282 for_each_cpu_mask_nr(i, policy->cpus) { 282 for_each_cpu(i, policy->cpus) {
283 freqs.cpu = i; 283 freqs.cpu = i;
284 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 284 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
285 } 285 }
286 286
287 /* switch to physical CPU where state is to be changed */ 287 /* switch to physical CPU where state is to be changed */
288 set_cpus_allowed_ptr(current, &policy->cpus); 288 set_cpus_allowed_ptr(current, policy->cpus);
289 289
290 speedstep_set_state(newstate); 290 speedstep_set_state(newstate);
291 291
292 /* allow to be run on all CPUs */ 292 /* allow to be run on all CPUs */
293 set_cpus_allowed_ptr(current, &cpus_allowed); 293 set_cpus_allowed_ptr(current, &cpus_allowed);
294 294
295 for_each_cpu_mask_nr(i, policy->cpus) { 295 for_each_cpu(i, policy->cpus) {
296 freqs.cpu = i; 296 freqs.cpu = i;
297 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 297 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
298 } 298 }
@@ -322,11 +322,11 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
322 322
323 /* only run on CPU to be set, or on its sibling */ 323 /* only run on CPU to be set, or on its sibling */
324#ifdef CONFIG_SMP 324#ifdef CONFIG_SMP
325 policy->cpus = per_cpu(cpu_sibling_map, policy->cpu); 325 cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
326#endif 326#endif
327 327
328 cpus_allowed = current->cpus_allowed; 328 cpus_allowed = current->cpus_allowed;
329 set_cpus_allowed_ptr(current, &policy->cpus); 329 set_cpus_allowed_ptr(current, policy->cpus);
330 330
331 /* detect low and high frequency and transition latency */ 331 /* detect low and high frequency and transition latency */
332 result = speedstep_get_freqs(speedstep_processor, 332 result = speedstep_get_freqs(speedstep_processor,
@@ -339,7 +339,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
339 return result; 339 return result;
340 340
341 /* get current speed setting */ 341 /* get current speed setting */
342 speed = _speedstep_get(&policy->cpus); 342 speed = _speedstep_get(policy->cpus);
343 if (!speed) 343 if (!speed)
344 return -EIO; 344 return -EIO;
345 345
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c
index 0ebf3fc6a61..dfaebce3633 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_32.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_32.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * mce.c - x86 Machine Check Exception Reporting 2 * mce.c - x86 Machine Check Exception Reporting
3 * (c) 2002 Alan Cox <alan@redhat.com>, Dave Jones <davej@redhat.com> 3 * (c) 2002 Alan Cox <alan@lxorguk.ukuu.org.uk>, Dave Jones <davej@redhat.com>
4 */ 4 */
5 5
6#include <linux/init.h> 6#include <linux/init.h>
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index a5a5e053037..8ae8c4ff094 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -462,7 +462,7 @@ out_free:
462 return err; 462 return err;
463} 463}
464 464
465static long local_allocate_threshold_blocks(void *_bank) 465static __cpuinit long local_allocate_threshold_blocks(void *_bank)
466{ 466{
467 unsigned int *bank = _bank; 467 unsigned int *bank = _bank;
468 468
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
index bfa5817afdd..c9f77ea69ed 100644
--- a/arch/x86/kernel/cpu/mcheck/p5.c
+++ b/arch/x86/kernel/cpu/mcheck/p5.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * P5 specific Machine Check Exception Reporting 2 * P5 specific Machine Check Exception Reporting
3 * (C) Copyright 2002 Alan Cox <alan@redhat.com> 3 * (C) Copyright 2002 Alan Cox <alan@lxorguk.ukuu.org.uk>
4 */ 4 */
5 5
6#include <linux/init.h> 6#include <linux/init.h>
diff --git a/arch/x86/kernel/cpu/mcheck/p6.c b/arch/x86/kernel/cpu/mcheck/p6.c
index 62efc9c2b3a..2ac52d7b434 100644
--- a/arch/x86/kernel/cpu/mcheck/p6.c
+++ b/arch/x86/kernel/cpu/mcheck/p6.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * P6 specific Machine Check Exception Reporting 2 * P6 specific Machine Check Exception Reporting
3 * (C) Copyright 2002 Alan Cox <alan@redhat.com> 3 * (C) Copyright 2002 Alan Cox <alan@lxorguk.ukuu.org.uk>
4 */ 4 */
5 5
6#include <linux/init.h> 6#include <linux/init.h>
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
index f2be3e190c6..2a043d89811 100644
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * IDT Winchip specific Machine Check Exception Reporting 2 * IDT Winchip specific Machine Check Exception Reporting
3 * (C) Copyright 2002 Alan Cox <alan@redhat.com> 3 * (C) Copyright 2002 Alan Cox <alan@lxorguk.ukuu.org.uk>
4 */ 4 */
5 5
6#include <linux/init.h> 6#include <linux/init.h>
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 65a13943e09..e85826829cf 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -665,6 +665,27 @@ void __init e820_mark_nosave_regions(unsigned long limit_pfn)
665} 665}
666#endif 666#endif
667 667
668#ifdef CONFIG_HIBERNATION
669/**
670 * Mark ACPI NVS memory region, so that we can save/restore it during
671 * hibernation and the subsequent resume.
672 */
673static int __init e820_mark_nvs_memory(void)
674{
675 int i;
676
677 for (i = 0; i < e820.nr_map; i++) {
678 struct e820entry *ei = &e820.map[i];
679
680 if (ei->type == E820_NVS)
681 hibernate_nvs_register(ei->addr, ei->size);
682 }
683
684 return 0;
685}
686core_initcall(e820_mark_nvs_memory);
687#endif
688
668/* 689/*
669 * Early reserved memory areas. 690 * Early reserved memory areas.
670 */ 691 */
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 744aa7fc49d..76b8cd953de 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -201,6 +201,12 @@ struct chipset {
201 void (*f)(int num, int slot, int func); 201 void (*f)(int num, int slot, int func);
202}; 202};
203 203
204/*
205 * Only works for devices on the root bus. If you add any devices
206 * not on bus 0 readd another loop level in early_quirks(). But
207 * be careful because at least the Nvidia quirk here relies on
208 * only matching on bus 0.
209 */
204static struct chipset early_qrk[] __initdata = { 210static struct chipset early_qrk[] __initdata = {
205 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, 211 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
206 PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs }, 212 PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs },
@@ -267,17 +273,17 @@ static int __init check_dev_quirk(int num, int slot, int func)
267 273
268void __init early_quirks(void) 274void __init early_quirks(void)
269{ 275{
270 int num, slot, func; 276 int slot, func;
271 277
272 if (!early_pci_allowed()) 278 if (!early_pci_allowed())
273 return; 279 return;
274 280
275 /* Poor man's PCI discovery */ 281 /* Poor man's PCI discovery */
276 for (num = 0; num < 32; num++) 282 /* Only scan the root bus */
277 for (slot = 0; slot < 32; slot++) 283 for (slot = 0; slot < 32; slot++)
278 for (func = 0; func < 8; func++) { 284 for (func = 0; func < 8; func++) {
279 /* Only probe function 0 on single fn devices */ 285 /* Only probe function 0 on single fn devices */
280 if (check_dev_quirk(num, slot, func)) 286 if (check_dev_quirk(0, slot, func))
281 break; 287 break;
282 } 288 }
283} 289}
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 26cfdc1d7c7..0e275d49556 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -305,7 +305,7 @@ ENTRY(early_idt_handler)
305 call dump_stack 305 call dump_stack
306#ifdef CONFIG_KALLSYMS 306#ifdef CONFIG_KALLSYMS
307 leaq early_idt_ripmsg(%rip),%rdi 307 leaq early_idt_ripmsg(%rip),%rdi
308 movq 8(%rsp),%rsi # get rip again 308 movq 0(%rsp),%rsi # get rip again
309 call __print_symbol 309 call __print_symbol
310#endif 310#endif
311#endif /* EARLY_PRINTK */ 311#endif /* EARLY_PRINTK */
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 4b8a53d841f..11d5093eb28 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -11,15 +11,15 @@
11#include <linux/kernel_stat.h> 11#include <linux/kernel_stat.h>
12#include <linux/sysdev.h> 12#include <linux/sysdev.h>
13#include <linux/bitops.h> 13#include <linux/bitops.h>
14#include <linux/acpi.h>
15#include <linux/io.h>
16#include <linux/delay.h>
14 17
15#include <asm/acpi.h>
16#include <asm/atomic.h> 18#include <asm/atomic.h>
17#include <asm/system.h> 19#include <asm/system.h>
18#include <asm/io.h>
19#include <asm/timer.h> 20#include <asm/timer.h>
20#include <asm/hw_irq.h> 21#include <asm/hw_irq.h>
21#include <asm/pgtable.h> 22#include <asm/pgtable.h>
22#include <asm/delay.h>
23#include <asm/desc.h> 23#include <asm/desc.h>
24#include <asm/apic.h> 24#include <asm/apic.h>
25#include <asm/arch_hooks.h> 25#include <asm/arch_hooks.h>
@@ -323,7 +323,7 @@ void init_8259A(int auto_eoi)
323 outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ 323 outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
324 324
325 /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64, 325 /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64,
326 to 0x20-0x27 on i386 */ 326 to 0x20-0x27 on i386 */
327 outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR); 327 outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR);
328 328
329 /* 8259A-1 (the master) has a slave on IR2 */ 329 /* 8259A-1 (the master) has a slave on IR2 */
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 3639442aa7a..1c4a1302536 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -129,7 +129,6 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int cpu)
129 node = cpu_to_node(cpu); 129 node = cpu_to_node(cpu);
130 130
131 pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node); 131 pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node);
132 printk(KERN_DEBUG " alloc irq_2_pin on cpu %d node %d\n", cpu, node);
133 132
134 return pin; 133 return pin;
135} 134}
@@ -227,7 +226,6 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu)
227 cpumask_clear(cfg->old_domain); 226 cpumask_clear(cfg->old_domain);
228 } 227 }
229 } 228 }
230 printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node);
231 229
232 return cfg; 230 return cfg;
233} 231}
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index 19191430274..b12208f4dfe 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -35,8 +35,8 @@ static void set_bitmap(unsigned long *bitmap, unsigned int base,
35 */ 35 */
36asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) 36asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
37{ 37{
38 struct thread_struct * t = &current->thread; 38 struct thread_struct *t = &current->thread;
39 struct tss_struct * tss; 39 struct tss_struct *tss;
40 unsigned int i, max_long, bytes, bytes_updated; 40 unsigned int i, max_long, bytes, bytes_updated;
41 41
42 if ((from + num <= from) || (from + num > IO_BITMAP_BITS)) 42 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index bce53e1352a..3973e2df7f8 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -5,10 +5,10 @@
5#include <linux/interrupt.h> 5#include <linux/interrupt.h>
6#include <linux/kernel_stat.h> 6#include <linux/kernel_stat.h>
7#include <linux/seq_file.h> 7#include <linux/seq_file.h>
8#include <linux/smp.h>
8 9
9#include <asm/apic.h> 10#include <asm/apic.h>
10#include <asm/io_apic.h> 11#include <asm/io_apic.h>
11#include <asm/smp.h>
12#include <asm/irq.h> 12#include <asm/irq.h>
13 13
14atomic_t irq_err_count; 14atomic_t irq_err_count;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 9dc5588f336..74b9ff7341e 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -15,9 +15,9 @@
15#include <linux/notifier.h> 15#include <linux/notifier.h>
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/uaccess.h>
18 19
19#include <asm/apic.h> 20#include <asm/apic.h>
20#include <asm/uaccess.h>
21 21
22DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 22DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
23EXPORT_PER_CPU_SYMBOL(irq_stat); 23EXPORT_PER_CPU_SYMBOL(irq_stat);
@@ -93,7 +93,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
93 return 0; 93 return 0;
94 94
95 /* build the stack frame on the IRQ stack */ 95 /* build the stack frame on the IRQ stack */
96 isp = (u32 *) ((char*)irqctx + sizeof(*irqctx)); 96 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
97 irqctx->tinfo.task = curctx->tinfo.task; 97 irqctx->tinfo.task = curctx->tinfo.task;
98 irqctx->tinfo.previous_esp = current_stack_pointer; 98 irqctx->tinfo.previous_esp = current_stack_pointer;
99 99
@@ -137,7 +137,7 @@ void __cpuinit irq_ctx_init(int cpu)
137 137
138 hardirq_ctx[cpu] = irqctx; 138 hardirq_ctx[cpu] = irqctx;
139 139
140 irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE]; 140 irqctx = (union irq_ctx *) &softirq_stack[cpu*THREAD_SIZE];
141 irqctx->tinfo.task = NULL; 141 irqctx->tinfo.task = NULL;
142 irqctx->tinfo.exec_domain = NULL; 142 irqctx->tinfo.exec_domain = NULL;
143 irqctx->tinfo.cpu = cpu; 143 irqctx->tinfo.cpu = cpu;
@@ -147,7 +147,7 @@ void __cpuinit irq_ctx_init(int cpu)
147 softirq_ctx[cpu] = irqctx; 147 softirq_ctx[cpu] = irqctx;
148 148
149 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", 149 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
150 cpu,hardirq_ctx[cpu],softirq_ctx[cpu]); 150 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
151} 151}
152 152
153void irq_ctx_exit(int cpu) 153void irq_ctx_exit(int cpu)
@@ -174,7 +174,7 @@ asmlinkage void do_softirq(void)
174 irqctx->tinfo.previous_esp = current_stack_pointer; 174 irqctx->tinfo.previous_esp = current_stack_pointer;
175 175
176 /* build the stack frame on the softirq stack */ 176 /* build the stack frame on the softirq stack */
177 isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); 177 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
178 178
179 call_on_stack(__do_softirq, isp); 179 call_on_stack(__do_softirq, isp);
180 /* 180 /*
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 6383d50f82e..63c88e6ec02 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -14,10 +14,10 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/ftrace.h> 16#include <linux/ftrace.h>
17#include <asm/uaccess.h> 17#include <linux/uaccess.h>
18#include <linux/smp.h>
18#include <asm/io_apic.h> 19#include <asm/io_apic.h>
19#include <asm/idle.h> 20#include <asm/idle.h>
20#include <asm/smp.h>
21 21
22/* 22/*
23 * Probabilistic stack overflow check: 23 * Probabilistic stack overflow check:
@@ -142,18 +142,18 @@ extern void call_softirq(void);
142 142
143asmlinkage void do_softirq(void) 143asmlinkage void do_softirq(void)
144{ 144{
145 __u32 pending; 145 __u32 pending;
146 unsigned long flags; 146 unsigned long flags;
147 147
148 if (in_interrupt()) 148 if (in_interrupt())
149 return; 149 return;
150 150
151 local_irq_save(flags); 151 local_irq_save(flags);
152 pending = local_softirq_pending(); 152 pending = local_softirq_pending();
153 /* Switch to interrupt stack */ 153 /* Switch to interrupt stack */
154 if (pending) { 154 if (pending) {
155 call_softirq(); 155 call_softirq();
156 WARN_ON_ONCE(softirq_count()); 156 WARN_ON_ONCE(softirq_count());
157 } 157 }
158 local_irq_restore(flags); 158 local_irq_restore(flags);
159} 159}
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index 84723295f88..1507ad4e674 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -9,18 +9,18 @@
9#include <linux/kernel_stat.h> 9#include <linux/kernel_stat.h>
10#include <linux/sysdev.h> 10#include <linux/sysdev.h>
11#include <linux/bitops.h> 11#include <linux/bitops.h>
12#include <linux/io.h>
13#include <linux/delay.h>
12 14
13#include <asm/atomic.h> 15#include <asm/atomic.h>
14#include <asm/system.h> 16#include <asm/system.h>
15#include <asm/io.h>
16#include <asm/timer.h> 17#include <asm/timer.h>
17#include <asm/pgtable.h> 18#include <asm/pgtable.h>
18#include <asm/delay.h>
19#include <asm/desc.h> 19#include <asm/desc.h>
20#include <asm/apic.h> 20#include <asm/apic.h>
21#include <asm/arch_hooks.h> 21#include <asm/arch_hooks.h>
22#include <asm/i8259.h> 22#include <asm/i8259.h>
23 23#include <asm/traps.h>
24 24
25 25
26/* 26/*
@@ -34,12 +34,10 @@
34 * leads to races. IBM designers who came up with it should 34 * leads to races. IBM designers who came up with it should
35 * be shot. 35 * be shot.
36 */ 36 */
37
38 37
39static irqreturn_t math_error_irq(int cpl, void *dev_id) 38static irqreturn_t math_error_irq(int cpl, void *dev_id)
40{ 39{
41 extern void math_error(void __user *); 40 outb(0, 0xF0);
42 outb(0,0xF0);
43 if (ignore_fpu_irq || !boot_cpu_data.hard_math) 41 if (ignore_fpu_irq || !boot_cpu_data.hard_math)
44 return IRQ_NONE; 42 return IRQ_NONE;
45 math_error((void __user *)get_irq_regs()->ip); 43 math_error((void __user *)get_irq_regs()->ip);
@@ -56,7 +54,7 @@ static struct irqaction fpu_irq = {
56 .name = "fpu", 54 .name = "fpu",
57}; 55};
58 56
59void __init init_ISA_irqs (void) 57void __init init_ISA_irqs(void)
60{ 58{
61 int i; 59 int i;
62 60
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c
index 31ebfe38e96..da481a1e3f3 100644
--- a/arch/x86/kernel/irqinit_64.c
+++ b/arch/x86/kernel/irqinit_64.c
@@ -11,14 +11,14 @@
11#include <linux/kernel_stat.h> 11#include <linux/kernel_stat.h>
12#include <linux/sysdev.h> 12#include <linux/sysdev.h>
13#include <linux/bitops.h> 13#include <linux/bitops.h>
14#include <linux/acpi.h>
15#include <linux/io.h>
16#include <linux/delay.h>
14 17
15#include <asm/acpi.h>
16#include <asm/atomic.h> 18#include <asm/atomic.h>
17#include <asm/system.h> 19#include <asm/system.h>
18#include <asm/io.h>
19#include <asm/hw_irq.h> 20#include <asm/hw_irq.h>
20#include <asm/pgtable.h> 21#include <asm/pgtable.h>
21#include <asm/delay.h>
22#include <asm/desc.h> 22#include <asm/desc.h>
23#include <asm/apic.h> 23#include <asm/apic.h>
24#include <asm/i8259.h> 24#include <asm/i8259.h>
@@ -81,7 +81,7 @@ int vector_used_by_percpu_irq(unsigned int vector)
81 return 0; 81 return 0;
82} 82}
83 83
84void __init init_ISA_irqs(void) 84static void __init init_ISA_irqs(void)
85{ 85{
86 int i; 86 int i;
87 87
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 6c27679ec6a..884d985b8b8 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -376,9 +376,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
376 376
377void __kprobes arch_remove_kprobe(struct kprobe *p) 377void __kprobes arch_remove_kprobe(struct kprobe *p)
378{ 378{
379 mutex_lock(&kprobe_mutex); 379 if (p->ainsn.insn) {
380 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); 380 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
381 mutex_unlock(&kprobe_mutex); 381 p->ainsn.insn = NULL;
382 }
382} 383}
383 384
384static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 385static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
@@ -694,7 +695,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
694 /* 695 /*
695 * It is possible to have multiple instances associated with a given 696 * It is possible to have multiple instances associated with a given
696 * task either because multiple functions in the call path have 697 * task either because multiple functions in the call path have
697 * return probes installed on them, and/or more then one 698 * return probes installed on them, and/or more than one
698 * return probe was registered for a target function. 699 * return probe was registered for a target function.
699 * 700 *
700 * We can handle this because: 701 * We can handle this because:
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index c12314c9e86..8815f3c7fec 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -252,7 +252,7 @@ EXPORT_SYMBOL_GPL(geode_mfgpt_alloc_timer);
252/* 252/*
253 * The MFPGT timers on the CS5536 provide us with suitable timers to use 253 * The MFPGT timers on the CS5536 provide us with suitable timers to use
254 * as clock event sources - not as good as a HPET or APIC, but certainly 254 * as clock event sources - not as good as a HPET or APIC, but certainly
255 * better then the PIT. This isn't a general purpose MFGPT driver, but 255 * better than the PIT. This isn't a general purpose MFGPT driver, but
256 * a simplified one designed specifically to act as a clock event source. 256 * a simplified one designed specifically to act as a clock event source.
257 * For full details about the MFGPT, please consult the CS5536 data sheet. 257 * For full details about the MFGPT, please consult the CS5536 data sheet.
258 */ 258 */
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index c5c5b8df1db..c0601c2848a 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -2,7 +2,7 @@
2 * Intel Multiprocessor Specification 1.1 and 1.4 2 * Intel Multiprocessor Specification 1.1 and 1.4
3 * compliant MP-table parsing routines. 3 * compliant MP-table parsing routines.
4 * 4 *
5 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> 5 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> 6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de> 7 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
8 */ 8 */
@@ -17,7 +17,6 @@
17#include <linux/acpi.h> 17#include <linux/acpi.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/smp.h> 19#include <linux/smp.h>
20#include <linux/acpi.h>
21 20
22#include <asm/mtrr.h> 21#include <asm/mtrr.h>
23#include <asm/mpspec.h> 22#include <asm/mpspec.h>
@@ -49,12 +48,12 @@ static int __init mpf_checksum(unsigned char *mp, int len)
49 return sum & 0xFF; 48 return sum & 0xFF;
50} 49}
51 50
52static void __init MP_processor_info(struct mpc_config_processor *m) 51static void __init MP_processor_info(struct mpc_cpu *m)
53{ 52{
54 int apicid; 53 int apicid;
55 char *bootup_cpu = ""; 54 char *bootup_cpu = "";
56 55
57 if (!(m->mpc_cpuflag & CPU_ENABLED)) { 56 if (!(m->cpuflag & CPU_ENABLED)) {
58 disabled_cpus++; 57 disabled_cpus++;
59 return; 58 return;
60 } 59 }
@@ -62,54 +61,54 @@ static void __init MP_processor_info(struct mpc_config_processor *m)
62 if (x86_quirks->mpc_apic_id) 61 if (x86_quirks->mpc_apic_id)
63 apicid = x86_quirks->mpc_apic_id(m); 62 apicid = x86_quirks->mpc_apic_id(m);
64 else 63 else
65 apicid = m->mpc_apicid; 64 apicid = m->apicid;
66 65
67 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { 66 if (m->cpuflag & CPU_BOOTPROCESSOR) {
68 bootup_cpu = " (Bootup-CPU)"; 67 bootup_cpu = " (Bootup-CPU)";
69 boot_cpu_physical_apicid = m->mpc_apicid; 68 boot_cpu_physical_apicid = m->apicid;
70 } 69 }
71 70
72 printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu); 71 printk(KERN_INFO "Processor #%d%s\n", m->apicid, bootup_cpu);
73 generic_processor_info(apicid, m->mpc_apicver); 72 generic_processor_info(apicid, m->apicver);
74} 73}
75 74
76#ifdef CONFIG_X86_IO_APIC 75#ifdef CONFIG_X86_IO_APIC
77static void __init MP_bus_info(struct mpc_config_bus *m) 76static void __init MP_bus_info(struct mpc_bus *m)
78{ 77{
79 char str[7]; 78 char str[7];
80 memcpy(str, m->mpc_bustype, 6); 79 memcpy(str, m->bustype, 6);
81 str[6] = 0; 80 str[6] = 0;
82 81
83 if (x86_quirks->mpc_oem_bus_info) 82 if (x86_quirks->mpc_oem_bus_info)
84 x86_quirks->mpc_oem_bus_info(m, str); 83 x86_quirks->mpc_oem_bus_info(m, str);
85 else 84 else
86 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->mpc_busid, str); 85 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
87 86
88#if MAX_MP_BUSSES < 256 87#if MAX_MP_BUSSES < 256
89 if (m->mpc_busid >= MAX_MP_BUSSES) { 88 if (m->busid >= MAX_MP_BUSSES) {
90 printk(KERN_WARNING "MP table busid value (%d) for bustype %s " 89 printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
91 " is too large, max. supported is %d\n", 90 " is too large, max. supported is %d\n",
92 m->mpc_busid, str, MAX_MP_BUSSES - 1); 91 m->busid, str, MAX_MP_BUSSES - 1);
93 return; 92 return;
94 } 93 }
95#endif 94#endif
96 95
97 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { 96 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
98 set_bit(m->mpc_busid, mp_bus_not_pci); 97 set_bit(m->busid, mp_bus_not_pci);
99#if defined(CONFIG_EISA) || defined(CONFIG_MCA) 98#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
100 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; 99 mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
101#endif 100#endif
102 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { 101 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
103 if (x86_quirks->mpc_oem_pci_bus) 102 if (x86_quirks->mpc_oem_pci_bus)
104 x86_quirks->mpc_oem_pci_bus(m); 103 x86_quirks->mpc_oem_pci_bus(m);
105 104
106 clear_bit(m->mpc_busid, mp_bus_not_pci); 105 clear_bit(m->busid, mp_bus_not_pci);
107#if defined(CONFIG_EISA) || defined(CONFIG_MCA) 106#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
108 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; 107 mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
109 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { 108 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
110 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; 109 mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
111 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) { 110 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) {
112 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; 111 mp_bus_id_to_type[m->busid] = MP_BUS_MCA;
113#endif 112#endif
114 } else 113 } else
115 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); 114 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
@@ -133,32 +132,31 @@ static int bad_ioapic(unsigned long address)
133 return 0; 132 return 0;
134} 133}
135 134
136static void __init MP_ioapic_info(struct mpc_config_ioapic *m) 135static void __init MP_ioapic_info(struct mpc_ioapic *m)
137{ 136{
138 if (!(m->mpc_flags & MPC_APIC_USABLE)) 137 if (!(m->flags & MPC_APIC_USABLE))
139 return; 138 return;
140 139
141 printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n", 140 printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
142 m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr); 141 m->apicid, m->apicver, m->apicaddr);
143 142
144 if (bad_ioapic(m->mpc_apicaddr)) 143 if (bad_ioapic(m->apicaddr))
145 return; 144 return;
146 145
147 mp_ioapics[nr_ioapics].mp_apicaddr = m->mpc_apicaddr; 146 mp_ioapics[nr_ioapics].mp_apicaddr = m->apicaddr;
148 mp_ioapics[nr_ioapics].mp_apicid = m->mpc_apicid; 147 mp_ioapics[nr_ioapics].mp_apicid = m->apicid;
149 mp_ioapics[nr_ioapics].mp_type = m->mpc_type; 148 mp_ioapics[nr_ioapics].mp_type = m->type;
150 mp_ioapics[nr_ioapics].mp_apicver = m->mpc_apicver; 149 mp_ioapics[nr_ioapics].mp_apicver = m->apicver;
151 mp_ioapics[nr_ioapics].mp_flags = m->mpc_flags; 150 mp_ioapics[nr_ioapics].mp_flags = m->flags;
152 nr_ioapics++; 151 nr_ioapics++;
153} 152}
154 153
155static void print_MP_intsrc_info(struct mpc_config_intsrc *m) 154static void print_MP_intsrc_info(struct mpc_intsrc *m)
156{ 155{
157 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," 156 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
158 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 157 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
159 m->mpc_irqtype, m->mpc_irqflag & 3, 158 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus,
160 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, 159 m->srcbusirq, m->dstapic, m->dstirq);
161 m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
162} 160}
163 161
164static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq) 162static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq)
@@ -170,52 +168,52 @@ static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq)
170 mp_irq->mp_srcbusirq, mp_irq->mp_dstapic, mp_irq->mp_dstirq); 168 mp_irq->mp_srcbusirq, mp_irq->mp_dstapic, mp_irq->mp_dstirq);
171} 169}
172 170
173static void __init assign_to_mp_irq(struct mpc_config_intsrc *m, 171static void __init assign_to_mp_irq(struct mpc_intsrc *m,
174 struct mp_config_intsrc *mp_irq) 172 struct mp_config_intsrc *mp_irq)
175{ 173{
176 mp_irq->mp_dstapic = m->mpc_dstapic; 174 mp_irq->mp_dstapic = m->dstapic;
177 mp_irq->mp_type = m->mpc_type; 175 mp_irq->mp_type = m->type;
178 mp_irq->mp_irqtype = m->mpc_irqtype; 176 mp_irq->mp_irqtype = m->irqtype;
179 mp_irq->mp_irqflag = m->mpc_irqflag; 177 mp_irq->mp_irqflag = m->irqflag;
180 mp_irq->mp_srcbus = m->mpc_srcbus; 178 mp_irq->mp_srcbus = m->srcbus;
181 mp_irq->mp_srcbusirq = m->mpc_srcbusirq; 179 mp_irq->mp_srcbusirq = m->srcbusirq;
182 mp_irq->mp_dstirq = m->mpc_dstirq; 180 mp_irq->mp_dstirq = m->dstirq;
183} 181}
184 182
185static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq, 183static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq,
186 struct mpc_config_intsrc *m) 184 struct mpc_intsrc *m)
187{ 185{
188 m->mpc_dstapic = mp_irq->mp_dstapic; 186 m->dstapic = mp_irq->mp_dstapic;
189 m->mpc_type = mp_irq->mp_type; 187 m->type = mp_irq->mp_type;
190 m->mpc_irqtype = mp_irq->mp_irqtype; 188 m->irqtype = mp_irq->mp_irqtype;
191 m->mpc_irqflag = mp_irq->mp_irqflag; 189 m->irqflag = mp_irq->mp_irqflag;
192 m->mpc_srcbus = mp_irq->mp_srcbus; 190 m->srcbus = mp_irq->mp_srcbus;
193 m->mpc_srcbusirq = mp_irq->mp_srcbusirq; 191 m->srcbusirq = mp_irq->mp_srcbusirq;
194 m->mpc_dstirq = mp_irq->mp_dstirq; 192 m->dstirq = mp_irq->mp_dstirq;
195} 193}
196 194
197static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq, 195static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq,
198 struct mpc_config_intsrc *m) 196 struct mpc_intsrc *m)
199{ 197{
200 if (mp_irq->mp_dstapic != m->mpc_dstapic) 198 if (mp_irq->mp_dstapic != m->dstapic)
201 return 1; 199 return 1;
202 if (mp_irq->mp_type != m->mpc_type) 200 if (mp_irq->mp_type != m->type)
203 return 2; 201 return 2;
204 if (mp_irq->mp_irqtype != m->mpc_irqtype) 202 if (mp_irq->mp_irqtype != m->irqtype)
205 return 3; 203 return 3;
206 if (mp_irq->mp_irqflag != m->mpc_irqflag) 204 if (mp_irq->mp_irqflag != m->irqflag)
207 return 4; 205 return 4;
208 if (mp_irq->mp_srcbus != m->mpc_srcbus) 206 if (mp_irq->mp_srcbus != m->srcbus)
209 return 5; 207 return 5;
210 if (mp_irq->mp_srcbusirq != m->mpc_srcbusirq) 208 if (mp_irq->mp_srcbusirq != m->srcbusirq)
211 return 6; 209 return 6;
212 if (mp_irq->mp_dstirq != m->mpc_dstirq) 210 if (mp_irq->mp_dstirq != m->dstirq)
213 return 7; 211 return 7;
214 212
215 return 0; 213 return 0;
216} 214}
217 215
218static void __init MP_intsrc_info(struct mpc_config_intsrc *m) 216static void __init MP_intsrc_info(struct mpc_intsrc *m)
219{ 217{
220 int i; 218 int i;
221 219
@@ -233,57 +231,55 @@ static void __init MP_intsrc_info(struct mpc_config_intsrc *m)
233 231
234#endif 232#endif
235 233
236static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m) 234static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
237{ 235{
238 apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x," 236 apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x,"
239 " IRQ %02x, APIC ID %x, APIC LINT %02x\n", 237 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
240 m->mpc_irqtype, m->mpc_irqflag & 3, 238 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
241 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid, 239 m->srcbusirq, m->destapic, m->destapiclint);
242 m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
243} 240}
244 241
245/* 242/*
246 * Read/parse the MPC 243 * Read/parse the MPC
247 */ 244 */
248 245
249static int __init smp_check_mpc(struct mp_config_table *mpc, char *oem, 246static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
250 char *str)
251{ 247{
252 248
253 if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) { 249 if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
254 printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n", 250 printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n",
255 mpc->mpc_signature[0], mpc->mpc_signature[1], 251 mpc->signature[0], mpc->signature[1],
256 mpc->mpc_signature[2], mpc->mpc_signature[3]); 252 mpc->signature[2], mpc->signature[3]);
257 return 0; 253 return 0;
258 } 254 }
259 if (mpf_checksum((unsigned char *)mpc, mpc->mpc_length)) { 255 if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
260 printk(KERN_ERR "MPTABLE: checksum error!\n"); 256 printk(KERN_ERR "MPTABLE: checksum error!\n");
261 return 0; 257 return 0;
262 } 258 }
263 if (mpc->mpc_spec != 0x01 && mpc->mpc_spec != 0x04) { 259 if (mpc->spec != 0x01 && mpc->spec != 0x04) {
264 printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n", 260 printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n",
265 mpc->mpc_spec); 261 mpc->spec);
266 return 0; 262 return 0;
267 } 263 }
268 if (!mpc->mpc_lapic) { 264 if (!mpc->lapic) {
269 printk(KERN_ERR "MPTABLE: null local APIC address!\n"); 265 printk(KERN_ERR "MPTABLE: null local APIC address!\n");
270 return 0; 266 return 0;
271 } 267 }
272 memcpy(oem, mpc->mpc_oem, 8); 268 memcpy(oem, mpc->oem, 8);
273 oem[8] = 0; 269 oem[8] = 0;
274 printk(KERN_INFO "MPTABLE: OEM ID: %s\n", oem); 270 printk(KERN_INFO "MPTABLE: OEM ID: %s\n", oem);
275 271
276 memcpy(str, mpc->mpc_productid, 12); 272 memcpy(str, mpc->productid, 12);
277 str[12] = 0; 273 str[12] = 0;
278 274
279 printk(KERN_INFO "MPTABLE: Product ID: %s\n", str); 275 printk(KERN_INFO "MPTABLE: Product ID: %s\n", str);
280 276
281 printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->mpc_lapic); 277 printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->lapic);
282 278
283 return 1; 279 return 1;
284} 280}
285 281
286static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) 282static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
287{ 283{
288 char str[16]; 284 char str[16];
289 char oem[10]; 285 char oem[10];
@@ -308,14 +304,14 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
308#endif 304#endif
309 /* save the local APIC address, it might be non-default */ 305 /* save the local APIC address, it might be non-default */
310 if (!acpi_lapic) 306 if (!acpi_lapic)
311 mp_lapic_addr = mpc->mpc_lapic; 307 mp_lapic_addr = mpc->lapic;
312 308
313 if (early) 309 if (early)
314 return 1; 310 return 1;
315 311
316 if (mpc->mpc_oemptr && x86_quirks->smp_read_mpc_oem) { 312 if (mpc->oemptr && x86_quirks->smp_read_mpc_oem) {
317 struct mp_config_oemtable *oem_table = (struct mp_config_oemtable *)(unsigned long)mpc->mpc_oemptr; 313 struct mpc_oemtable *oem_table = (void *)(long)mpc->oemptr;
318 x86_quirks->smp_read_mpc_oem(oem_table, mpc->mpc_oemsize); 314 x86_quirks->smp_read_mpc_oem(oem_table, mpc->oemsize);
319 } 315 }
320 316
321 /* 317 /*
@@ -324,12 +320,11 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
324 if (x86_quirks->mpc_record) 320 if (x86_quirks->mpc_record)
325 *x86_quirks->mpc_record = 0; 321 *x86_quirks->mpc_record = 0;
326 322
327 while (count < mpc->mpc_length) { 323 while (count < mpc->length) {
328 switch (*mpt) { 324 switch (*mpt) {
329 case MP_PROCESSOR: 325 case MP_PROCESSOR:
330 { 326 {
331 struct mpc_config_processor *m = 327 struct mpc_cpu *m = (struct mpc_cpu *)mpt;
332 (struct mpc_config_processor *)mpt;
333 /* ACPI may have already provided this data */ 328 /* ACPI may have already provided this data */
334 if (!acpi_lapic) 329 if (!acpi_lapic)
335 MP_processor_info(m); 330 MP_processor_info(m);
@@ -339,8 +334,7 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
339 } 334 }
340 case MP_BUS: 335 case MP_BUS:
341 { 336 {
342 struct mpc_config_bus *m = 337 struct mpc_bus *m = (struct mpc_bus *)mpt;
343 (struct mpc_config_bus *)mpt;
344#ifdef CONFIG_X86_IO_APIC 338#ifdef CONFIG_X86_IO_APIC
345 MP_bus_info(m); 339 MP_bus_info(m);
346#endif 340#endif
@@ -351,30 +345,28 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
351 case MP_IOAPIC: 345 case MP_IOAPIC:
352 { 346 {
353#ifdef CONFIG_X86_IO_APIC 347#ifdef CONFIG_X86_IO_APIC
354 struct mpc_config_ioapic *m = 348 struct mpc_ioapic *m = (struct mpc_ioapic *)mpt;
355 (struct mpc_config_ioapic *)mpt;
356 MP_ioapic_info(m); 349 MP_ioapic_info(m);
357#endif 350#endif
358 mpt += sizeof(struct mpc_config_ioapic); 351 mpt += sizeof(struct mpc_ioapic);
359 count += sizeof(struct mpc_config_ioapic); 352 count += sizeof(struct mpc_ioapic);
360 break; 353 break;
361 } 354 }
362 case MP_INTSRC: 355 case MP_INTSRC:
363 { 356 {
364#ifdef CONFIG_X86_IO_APIC 357#ifdef CONFIG_X86_IO_APIC
365 struct mpc_config_intsrc *m = 358 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
366 (struct mpc_config_intsrc *)mpt;
367 359
368 MP_intsrc_info(m); 360 MP_intsrc_info(m);
369#endif 361#endif
370 mpt += sizeof(struct mpc_config_intsrc); 362 mpt += sizeof(struct mpc_intsrc);
371 count += sizeof(struct mpc_config_intsrc); 363 count += sizeof(struct mpc_intsrc);
372 break; 364 break;
373 } 365 }
374 case MP_LINTSRC: 366 case MP_LINTSRC:
375 { 367 {
376 struct mpc_config_lintsrc *m = 368 struct mpc_lintsrc *m =
377 (struct mpc_config_lintsrc *)mpt; 369 (struct mpc_lintsrc *)mpt;
378 MP_lintsrc_info(m); 370 MP_lintsrc_info(m);
379 mpt += sizeof(*m); 371 mpt += sizeof(*m);
380 count += sizeof(*m); 372 count += sizeof(*m);
@@ -385,8 +377,8 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
385 printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n"); 377 printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n");
386 printk(KERN_ERR "type %x\n", *mpt); 378 printk(KERN_ERR "type %x\n", *mpt);
387 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16, 379 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16,
388 1, mpc, mpc->mpc_length, 1); 380 1, mpc, mpc->length, 1);
389 count = mpc->mpc_length; 381 count = mpc->length;
390 break; 382 break;
391 } 383 }
392 if (x86_quirks->mpc_record) 384 if (x86_quirks->mpc_record)
@@ -417,16 +409,16 @@ static int __init ELCR_trigger(unsigned int irq)
417 409
418static void __init construct_default_ioirq_mptable(int mpc_default_type) 410static void __init construct_default_ioirq_mptable(int mpc_default_type)
419{ 411{
420 struct mpc_config_intsrc intsrc; 412 struct mpc_intsrc intsrc;
421 int i; 413 int i;
422 int ELCR_fallback = 0; 414 int ELCR_fallback = 0;
423 415
424 intsrc.mpc_type = MP_INTSRC; 416 intsrc.type = MP_INTSRC;
425 intsrc.mpc_irqflag = 0; /* conforming */ 417 intsrc.irqflag = 0; /* conforming */
426 intsrc.mpc_srcbus = 0; 418 intsrc.srcbus = 0;
427 intsrc.mpc_dstapic = mp_ioapics[0].mp_apicid; 419 intsrc.dstapic = mp_ioapics[0].mp_apicid;
428 420
429 intsrc.mpc_irqtype = mp_INT; 421 intsrc.irqtype = mp_INT;
430 422
431 /* 423 /*
432 * If true, we have an ISA/PCI system with no IRQ entries 424 * If true, we have an ISA/PCI system with no IRQ entries
@@ -469,30 +461,30 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
469 * irqflag field (level sensitive, active high polarity). 461 * irqflag field (level sensitive, active high polarity).
470 */ 462 */
471 if (ELCR_trigger(i)) 463 if (ELCR_trigger(i))
472 intsrc.mpc_irqflag = 13; 464 intsrc.irqflag = 13;
473 else 465 else
474 intsrc.mpc_irqflag = 0; 466 intsrc.irqflag = 0;
475 } 467 }
476 468
477 intsrc.mpc_srcbusirq = i; 469 intsrc.srcbusirq = i;
478 intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ 470 intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
479 MP_intsrc_info(&intsrc); 471 MP_intsrc_info(&intsrc);
480 } 472 }
481 473
482 intsrc.mpc_irqtype = mp_ExtINT; 474 intsrc.irqtype = mp_ExtINT;
483 intsrc.mpc_srcbusirq = 0; 475 intsrc.srcbusirq = 0;
484 intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ 476 intsrc.dstirq = 0; /* 8259A to INTIN0 */
485 MP_intsrc_info(&intsrc); 477 MP_intsrc_info(&intsrc);
486} 478}
487 479
488 480
489static void __init construct_ioapic_table(int mpc_default_type) 481static void __init construct_ioapic_table(int mpc_default_type)
490{ 482{
491 struct mpc_config_ioapic ioapic; 483 struct mpc_ioapic ioapic;
492 struct mpc_config_bus bus; 484 struct mpc_bus bus;
493 485
494 bus.mpc_type = MP_BUS; 486 bus.type = MP_BUS;
495 bus.mpc_busid = 0; 487 bus.busid = 0;
496 switch (mpc_default_type) { 488 switch (mpc_default_type) {
497 default: 489 default:
498 printk(KERN_ERR "???\nUnknown standard configuration %d\n", 490 printk(KERN_ERR "???\nUnknown standard configuration %d\n",
@@ -500,29 +492,29 @@ static void __init construct_ioapic_table(int mpc_default_type)
500 /* fall through */ 492 /* fall through */
501 case 1: 493 case 1:
502 case 5: 494 case 5:
503 memcpy(bus.mpc_bustype, "ISA ", 6); 495 memcpy(bus.bustype, "ISA ", 6);
504 break; 496 break;
505 case 2: 497 case 2:
506 case 6: 498 case 6:
507 case 3: 499 case 3:
508 memcpy(bus.mpc_bustype, "EISA ", 6); 500 memcpy(bus.bustype, "EISA ", 6);
509 break; 501 break;
510 case 4: 502 case 4:
511 case 7: 503 case 7:
512 memcpy(bus.mpc_bustype, "MCA ", 6); 504 memcpy(bus.bustype, "MCA ", 6);
513 } 505 }
514 MP_bus_info(&bus); 506 MP_bus_info(&bus);
515 if (mpc_default_type > 4) { 507 if (mpc_default_type > 4) {
516 bus.mpc_busid = 1; 508 bus.busid = 1;
517 memcpy(bus.mpc_bustype, "PCI ", 6); 509 memcpy(bus.bustype, "PCI ", 6);
518 MP_bus_info(&bus); 510 MP_bus_info(&bus);
519 } 511 }
520 512
521 ioapic.mpc_type = MP_IOAPIC; 513 ioapic.type = MP_IOAPIC;
522 ioapic.mpc_apicid = 2; 514 ioapic.apicid = 2;
523 ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; 515 ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
524 ioapic.mpc_flags = MPC_APIC_USABLE; 516 ioapic.flags = MPC_APIC_USABLE;
525 ioapic.mpc_apicaddr = 0xFEC00000; 517 ioapic.apicaddr = 0xFEC00000;
526 MP_ioapic_info(&ioapic); 518 MP_ioapic_info(&ioapic);
527 519
528 /* 520 /*
@@ -536,8 +528,8 @@ static inline void __init construct_ioapic_table(int mpc_default_type) { }
536 528
537static inline void __init construct_default_ISA_mptable(int mpc_default_type) 529static inline void __init construct_default_ISA_mptable(int mpc_default_type)
538{ 530{
539 struct mpc_config_processor processor; 531 struct mpc_cpu processor;
540 struct mpc_config_lintsrc lintsrc; 532 struct mpc_lintsrc lintsrc;
541 int linttypes[2] = { mp_ExtINT, mp_NMI }; 533 int linttypes[2] = { mp_ExtINT, mp_NMI };
542 int i; 534 int i;
543 535
@@ -549,30 +541,30 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
549 /* 541 /*
550 * 2 CPUs, numbered 0 & 1. 542 * 2 CPUs, numbered 0 & 1.
551 */ 543 */
552 processor.mpc_type = MP_PROCESSOR; 544 processor.type = MP_PROCESSOR;
553 /* Either an integrated APIC or a discrete 82489DX. */ 545 /* Either an integrated APIC or a discrete 82489DX. */
554 processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; 546 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
555 processor.mpc_cpuflag = CPU_ENABLED; 547 processor.cpuflag = CPU_ENABLED;
556 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | 548 processor.cpufeature = (boot_cpu_data.x86 << 8) |
557 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; 549 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
558 processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; 550 processor.featureflag = boot_cpu_data.x86_capability[0];
559 processor.mpc_reserved[0] = 0; 551 processor.reserved[0] = 0;
560 processor.mpc_reserved[1] = 0; 552 processor.reserved[1] = 0;
561 for (i = 0; i < 2; i++) { 553 for (i = 0; i < 2; i++) {
562 processor.mpc_apicid = i; 554 processor.apicid = i;
563 MP_processor_info(&processor); 555 MP_processor_info(&processor);
564 } 556 }
565 557
566 construct_ioapic_table(mpc_default_type); 558 construct_ioapic_table(mpc_default_type);
567 559
568 lintsrc.mpc_type = MP_LINTSRC; 560 lintsrc.type = MP_LINTSRC;
569 lintsrc.mpc_irqflag = 0; /* conforming */ 561 lintsrc.irqflag = 0; /* conforming */
570 lintsrc.mpc_srcbusid = 0; 562 lintsrc.srcbusid = 0;
571 lintsrc.mpc_srcbusirq = 0; 563 lintsrc.srcbusirq = 0;
572 lintsrc.mpc_destapic = MP_APIC_ALL; 564 lintsrc.destapic = MP_APIC_ALL;
573 for (i = 0; i < 2; i++) { 565 for (i = 0; i < 2; i++) {
574 lintsrc.mpc_irqtype = linttypes[i]; 566 lintsrc.irqtype = linttypes[i];
575 lintsrc.mpc_destapiclint = i; 567 lintsrc.destapiclint = i;
576 MP_lintsrc_info(&lintsrc); 568 MP_lintsrc_info(&lintsrc);
577 } 569 }
578} 570}
@@ -657,15 +649,15 @@ static void __init __get_smp_config(unsigned int early)
657 * ISA defaults and hope it will work. 649 * ISA defaults and hope it will work.
658 */ 650 */
659 if (!mp_irq_entries) { 651 if (!mp_irq_entries) {
660 struct mpc_config_bus bus; 652 struct mpc_bus bus;
661 653
662 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, " 654 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, "
663 "using default mptable. " 655 "using default mptable. "
664 "(tell your hw vendor)\n"); 656 "(tell your hw vendor)\n");
665 657
666 bus.mpc_type = MP_BUS; 658 bus.type = MP_BUS;
667 bus.mpc_busid = 0; 659 bus.busid = 0;
668 memcpy(bus.mpc_bustype, "ISA ", 6); 660 memcpy(bus.bustype, "ISA ", 6);
669 MP_bus_info(&bus); 661 MP_bus_info(&bus);
670 662
671 construct_default_ioirq_mptable(0); 663 construct_default_ioirq_mptable(0);
@@ -803,14 +795,14 @@ void __init find_smp_config(void)
803#ifdef CONFIG_X86_IO_APIC 795#ifdef CONFIG_X86_IO_APIC
804static u8 __initdata irq_used[MAX_IRQ_SOURCES]; 796static u8 __initdata irq_used[MAX_IRQ_SOURCES];
805 797
806static int __init get_MP_intsrc_index(struct mpc_config_intsrc *m) 798static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
807{ 799{
808 int i; 800 int i;
809 801
810 if (m->mpc_irqtype != mp_INT) 802 if (m->irqtype != mp_INT)
811 return 0; 803 return 0;
812 804
813 if (m->mpc_irqflag != 0x0f) 805 if (m->irqflag != 0x0f)
814 return 0; 806 return 0;
815 807
816 /* not legacy */ 808 /* not legacy */
@@ -822,9 +814,9 @@ static int __init get_MP_intsrc_index(struct mpc_config_intsrc *m)
822 if (mp_irqs[i].mp_irqflag != 0x0f) 814 if (mp_irqs[i].mp_irqflag != 0x0f)
823 continue; 815 continue;
824 816
825 if (mp_irqs[i].mp_srcbus != m->mpc_srcbus) 817 if (mp_irqs[i].mp_srcbus != m->srcbus)
826 continue; 818 continue;
827 if (mp_irqs[i].mp_srcbusirq != m->mpc_srcbusirq) 819 if (mp_irqs[i].mp_srcbusirq != m->srcbusirq)
828 continue; 820 continue;
829 if (irq_used[i]) { 821 if (irq_used[i]) {
830 /* already claimed */ 822 /* already claimed */
@@ -840,10 +832,10 @@ static int __init get_MP_intsrc_index(struct mpc_config_intsrc *m)
840 832
841#define SPARE_SLOT_NUM 20 833#define SPARE_SLOT_NUM 20
842 834
843static struct mpc_config_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; 835static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
844#endif 836#endif
845 837
846static int __init replace_intsrc_all(struct mp_config_table *mpc, 838static int __init replace_intsrc_all(struct mpc_table *mpc,
847 unsigned long mpc_new_phys, 839 unsigned long mpc_new_phys,
848 unsigned long mpc_new_length) 840 unsigned long mpc_new_length)
849{ 841{
@@ -855,36 +847,33 @@ static int __init replace_intsrc_all(struct mp_config_table *mpc,
855 int count = sizeof(*mpc); 847 int count = sizeof(*mpc);
856 unsigned char *mpt = ((unsigned char *)mpc) + count; 848 unsigned char *mpt = ((unsigned char *)mpc) + count;
857 849
858 printk(KERN_INFO "mpc_length %x\n", mpc->mpc_length); 850 printk(KERN_INFO "mpc_length %x\n", mpc->length);
859 while (count < mpc->mpc_length) { 851 while (count < mpc->length) {
860 switch (*mpt) { 852 switch (*mpt) {
861 case MP_PROCESSOR: 853 case MP_PROCESSOR:
862 { 854 {
863 struct mpc_config_processor *m = 855 struct mpc_cpu *m = (struct mpc_cpu *)mpt;
864 (struct mpc_config_processor *)mpt;
865 mpt += sizeof(*m); 856 mpt += sizeof(*m);
866 count += sizeof(*m); 857 count += sizeof(*m);
867 break; 858 break;
868 } 859 }
869 case MP_BUS: 860 case MP_BUS:
870 { 861 {
871 struct mpc_config_bus *m = 862 struct mpc_bus *m = (struct mpc_bus *)mpt;
872 (struct mpc_config_bus *)mpt;
873 mpt += sizeof(*m); 863 mpt += sizeof(*m);
874 count += sizeof(*m); 864 count += sizeof(*m);
875 break; 865 break;
876 } 866 }
877 case MP_IOAPIC: 867 case MP_IOAPIC:
878 { 868 {
879 mpt += sizeof(struct mpc_config_ioapic); 869 mpt += sizeof(struct mpc_ioapic);
880 count += sizeof(struct mpc_config_ioapic); 870 count += sizeof(struct mpc_ioapic);
881 break; 871 break;
882 } 872 }
883 case MP_INTSRC: 873 case MP_INTSRC:
884 { 874 {
885#ifdef CONFIG_X86_IO_APIC 875#ifdef CONFIG_X86_IO_APIC
886 struct mpc_config_intsrc *m = 876 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
887 (struct mpc_config_intsrc *)mpt;
888 877
889 printk(KERN_INFO "OLD "); 878 printk(KERN_INFO "OLD ");
890 print_MP_intsrc_info(m); 879 print_MP_intsrc_info(m);
@@ -905,14 +894,14 @@ static int __init replace_intsrc_all(struct mp_config_table *mpc,
905 nr_m_spare++; 894 nr_m_spare++;
906 } 895 }
907#endif 896#endif
908 mpt += sizeof(struct mpc_config_intsrc); 897 mpt += sizeof(struct mpc_intsrc);
909 count += sizeof(struct mpc_config_intsrc); 898 count += sizeof(struct mpc_intsrc);
910 break; 899 break;
911 } 900 }
912 case MP_LINTSRC: 901 case MP_LINTSRC:
913 { 902 {
914 struct mpc_config_lintsrc *m = 903 struct mpc_lintsrc *m =
915 (struct mpc_config_lintsrc *)mpt; 904 (struct mpc_lintsrc *)mpt;
916 mpt += sizeof(*m); 905 mpt += sizeof(*m);
917 count += sizeof(*m); 906 count += sizeof(*m);
918 break; 907 break;
@@ -922,7 +911,7 @@ static int __init replace_intsrc_all(struct mp_config_table *mpc,
922 printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n"); 911 printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n");
923 printk(KERN_ERR "type %x\n", *mpt); 912 printk(KERN_ERR "type %x\n", *mpt);
924 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16, 913 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16,
925 1, mpc, mpc->mpc_length, 1); 914 1, mpc, mpc->length, 1);
926 goto out; 915 goto out;
927 } 916 }
928 } 917 }
@@ -944,9 +933,8 @@ static int __init replace_intsrc_all(struct mp_config_table *mpc,
944 assign_to_mpc_intsrc(&mp_irqs[i], m_spare[nr_m_spare]); 933 assign_to_mpc_intsrc(&mp_irqs[i], m_spare[nr_m_spare]);
945 m_spare[nr_m_spare] = NULL; 934 m_spare[nr_m_spare] = NULL;
946 } else { 935 } else {
947 struct mpc_config_intsrc *m = 936 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
948 (struct mpc_config_intsrc *)mpt; 937 count += sizeof(struct mpc_intsrc);
949 count += sizeof(struct mpc_config_intsrc);
950 if (!mpc_new_phys) { 938 if (!mpc_new_phys) {
951 printk(KERN_INFO "No spare slots, try to append...take your risk, new mpc_length %x\n", count); 939 printk(KERN_INFO "No spare slots, try to append...take your risk, new mpc_length %x\n", count);
952 } else { 940 } else {
@@ -958,17 +946,16 @@ static int __init replace_intsrc_all(struct mp_config_table *mpc,
958 } 946 }
959 } 947 }
960 assign_to_mpc_intsrc(&mp_irqs[i], m); 948 assign_to_mpc_intsrc(&mp_irqs[i], m);
961 mpc->mpc_length = count; 949 mpc->length = count;
962 mpt += sizeof(struct mpc_config_intsrc); 950 mpt += sizeof(struct mpc_intsrc);
963 } 951 }
964 print_mp_irq_info(&mp_irqs[i]); 952 print_mp_irq_info(&mp_irqs[i]);
965 } 953 }
966#endif 954#endif
967out: 955out:
968 /* update checksum */ 956 /* update checksum */
969 mpc->mpc_checksum = 0; 957 mpc->checksum = 0;
970 mpc->mpc_checksum -= mpf_checksum((unsigned char *)mpc, 958 mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
971 mpc->mpc_length);
972 959
973 return 0; 960 return 0;
974} 961}
@@ -1014,8 +1001,7 @@ static int __init update_mp_table(void)
1014 char str[16]; 1001 char str[16];
1015 char oem[10]; 1002 char oem[10];
1016 struct intel_mp_floating *mpf; 1003 struct intel_mp_floating *mpf;
1017 struct mp_config_table *mpc; 1004 struct mpc_table *mpc, *mpc_new;
1018 struct mp_config_table *mpc_new;
1019 1005
1020 if (!enable_update_mptable) 1006 if (!enable_update_mptable)
1021 return 0; 1007 return 0;
@@ -1041,7 +1027,7 @@ static int __init update_mp_table(void)
1041 printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf)); 1027 printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf));
1042 printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr); 1028 printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr);
1043 1029
1044 if (mpc_new_phys && mpc->mpc_length > mpc_new_length) { 1030 if (mpc_new_phys && mpc->length > mpc_new_length) {
1045 mpc_new_phys = 0; 1031 mpc_new_phys = 0;
1046 printk(KERN_INFO "mpc_new_length is %ld, please use alloc_mptable=8k\n", 1032 printk(KERN_INFO "mpc_new_length is %ld, please use alloc_mptable=8k\n",
1047 mpc_new_length); 1033 mpc_new_length);
@@ -1050,10 +1036,10 @@ static int __init update_mp_table(void)
1050 if (!mpc_new_phys) { 1036 if (!mpc_new_phys) {
1051 unsigned char old, new; 1037 unsigned char old, new;
1052 /* check if we can change the postion */ 1038 /* check if we can change the postion */
1053 mpc->mpc_checksum = 0; 1039 mpc->checksum = 0;
1054 old = mpf_checksum((unsigned char *)mpc, mpc->mpc_length); 1040 old = mpf_checksum((unsigned char *)mpc, mpc->length);
1055 mpc->mpc_checksum = 0xff; 1041 mpc->checksum = 0xff;
1056 new = mpf_checksum((unsigned char *)mpc, mpc->mpc_length); 1042 new = mpf_checksum((unsigned char *)mpc, mpc->length);
1057 if (old == new) { 1043 if (old == new) {
1058 printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n"); 1044 printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n");
1059 return 0; 1045 return 0;
@@ -1062,7 +1048,7 @@ static int __init update_mp_table(void)
1062 } else { 1048 } else {
1063 mpf->mpf_physptr = mpc_new_phys; 1049 mpf->mpf_physptr = mpc_new_phys;
1064 mpc_new = phys_to_virt(mpc_new_phys); 1050 mpc_new = phys_to_virt(mpc_new_phys);
1065 memcpy(mpc_new, mpc, mpc->mpc_length); 1051 memcpy(mpc_new, mpc, mpc->length);
1066 mpc = mpc_new; 1052 mpc = mpc_new;
1067 /* check if we can modify that */ 1053 /* check if we can modify that */
1068 if (mpc_new_phys - mpf->mpf_physptr) { 1054 if (mpc_new_phys - mpf->mpf_physptr) {
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 45a09ccdc21..7228979f1e7 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -26,7 +26,6 @@
26#include <linux/kernel_stat.h> 26#include <linux/kernel_stat.h>
27#include <linux/kdebug.h> 27#include <linux/kdebug.h>
28#include <linux/smp.h> 28#include <linux/smp.h>
29#include <linux/nmi.h>
30 29
31#include <asm/i8259.h> 30#include <asm/i8259.h>
32#include <asm/io_apic.h> 31#include <asm/io_apic.h>
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c
index 0deea37a53c..f2191d4f271 100644
--- a/arch/x86/kernel/numaq_32.c
+++ b/arch/x86/kernel/numaq_32.c
@@ -117,16 +117,15 @@ static inline int generate_logical_apicid(int quad, int phys_apicid)
117} 117}
118 118
119/* x86_quirks member */ 119/* x86_quirks member */
120static int mpc_apic_id(struct mpc_config_processor *m) 120static int mpc_apic_id(struct mpc_cpu *m)
121{ 121{
122 int quad = translation_table[mpc_record]->trans_quad; 122 int quad = translation_table[mpc_record]->trans_quad;
123 int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid); 123 int logical_apicid = generate_logical_apicid(quad, m->apicid);
124 124
125 printk(KERN_DEBUG "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n", 125 printk(KERN_DEBUG "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n",
126 m->mpc_apicid, 126 m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8,
127 (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, 127 (m->cpufeature & CPU_MODEL_MASK) >> 4,
128 (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, 128 m->apicver, quad, logical_apicid);
129 m->mpc_apicver, quad, logical_apicid);
130 return logical_apicid; 129 return logical_apicid;
131} 130}
132 131
@@ -135,26 +134,26 @@ int mp_bus_id_to_node[MAX_MP_BUSSES];
135int mp_bus_id_to_local[MAX_MP_BUSSES]; 134int mp_bus_id_to_local[MAX_MP_BUSSES];
136 135
137/* x86_quirks member */ 136/* x86_quirks member */
138static void mpc_oem_bus_info(struct mpc_config_bus *m, char *name) 137static void mpc_oem_bus_info(struct mpc_bus *m, char *name)
139{ 138{
140 int quad = translation_table[mpc_record]->trans_quad; 139 int quad = translation_table[mpc_record]->trans_quad;
141 int local = translation_table[mpc_record]->trans_local; 140 int local = translation_table[mpc_record]->trans_local;
142 141
143 mp_bus_id_to_node[m->mpc_busid] = quad; 142 mp_bus_id_to_node[m->busid] = quad;
144 mp_bus_id_to_local[m->mpc_busid] = local; 143 mp_bus_id_to_local[m->busid] = local;
145 printk(KERN_INFO "Bus #%d is %s (node %d)\n", 144 printk(KERN_INFO "Bus #%d is %s (node %d)\n",
146 m->mpc_busid, name, quad); 145 m->busid, name, quad);
147} 146}
148 147
149int quad_local_to_mp_bus_id [NR_CPUS/4][4]; 148int quad_local_to_mp_bus_id [NR_CPUS/4][4];
150 149
151/* x86_quirks member */ 150/* x86_quirks member */
152static void mpc_oem_pci_bus(struct mpc_config_bus *m) 151static void mpc_oem_pci_bus(struct mpc_bus *m)
153{ 152{
154 int quad = translation_table[mpc_record]->trans_quad; 153 int quad = translation_table[mpc_record]->trans_quad;
155 int local = translation_table[mpc_record]->trans_local; 154 int local = translation_table[mpc_record]->trans_local;
156 155
157 quad_local_to_mp_bus_id[quad][local] = m->mpc_busid; 156 quad_local_to_mp_bus_id[quad][local] = m->busid;
158} 157}
159 158
160static void __init MP_translation_info(struct mpc_config_translation *m) 159static void __init MP_translation_info(struct mpc_config_translation *m)
@@ -186,7 +185,7 @@ static int __init mpf_checksum(unsigned char *mp, int len)
186 * Read/parse the MPC oem tables 185 * Read/parse the MPC oem tables
187 */ 186 */
188 187
189static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, 188static void __init smp_read_mpc_oem(struct mpc_oemtable *oemtable,
190 unsigned short oemsize) 189 unsigned short oemsize)
191{ 190{
192 int count = sizeof(*oemtable); /* the header size */ 191 int count = sizeof(*oemtable); /* the header size */
@@ -195,18 +194,18 @@ static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable,
195 mpc_record = 0; 194 mpc_record = 0;
196 printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", 195 printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n",
197 oemtable); 196 oemtable);
198 if (memcmp(oemtable->oem_signature, MPC_OEM_SIGNATURE, 4)) { 197 if (memcmp(oemtable->signature, MPC_OEM_SIGNATURE, 4)) {
199 printk(KERN_WARNING 198 printk(KERN_WARNING
200 "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", 199 "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
201 oemtable->oem_signature[0], oemtable->oem_signature[1], 200 oemtable->signature[0], oemtable->signature[1],
202 oemtable->oem_signature[2], oemtable->oem_signature[3]); 201 oemtable->signature[2], oemtable->signature[3]);
203 return; 202 return;
204 } 203 }
205 if (mpf_checksum((unsigned char *)oemtable, oemtable->oem_length)) { 204 if (mpf_checksum((unsigned char *)oemtable, oemtable->length)) {
206 printk(KERN_WARNING "SMP oem mptable: checksum error!\n"); 205 printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
207 return; 206 return;
208 } 207 }
209 while (count < oemtable->oem_length) { 208 while (count < oemtable->length) {
210 switch (*oemptr) { 209 switch (*oemptr) {
211 case MP_TRANSLATION: 210 case MP_TRANSLATION:
212 { 211 {
@@ -260,8 +259,7 @@ static struct x86_quirks numaq_x86_quirks __initdata = {
260 .update_genapic = numaq_update_genapic, 259 .update_genapic = numaq_update_genapic,
261}; 260};
262 261
263void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, 262void numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
264 char *productid)
265{ 263{
266 if (strncmp(oem, "IBM NUMA", 8)) 264 if (strncmp(oem, "IBM NUMA", 8))
267 printk("Warning! Not a NUMA-Q system!\n"); 265 printk("Warning! Not a NUMA-Q system!\n");
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 0d75c129b18..f293a8df682 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -38,7 +38,7 @@ EXPORT_SYMBOL(bad_dma_address);
38 be probably a smaller DMA mask, but this is bug-to-bug compatible 38 be probably a smaller DMA mask, but this is bug-to-bug compatible
39 to older i386. */ 39 to older i386. */
40struct device x86_dma_fallback_dev = { 40struct device x86_dma_fallback_dev = {
41 .bus_id = "fallback device", 41 .init_name = "fallback device",
42 .coherent_dma_mask = DMA_32BIT_MASK, 42 .coherent_dma_mask = DMA_32BIT_MASK,
43 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask, 43 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
44}; 44};
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 3ba155d2488..a546f55c77b 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -39,11 +39,12 @@
39#include <linux/prctl.h> 39#include <linux/prctl.h>
40#include <linux/dmi.h> 40#include <linux/dmi.h>
41#include <linux/ftrace.h> 41#include <linux/ftrace.h>
42#include <linux/uaccess.h>
43#include <linux/io.h>
44#include <linux/kdebug.h>
42 45
43#include <asm/uaccess.h>
44#include <asm/pgtable.h> 46#include <asm/pgtable.h>
45#include <asm/system.h> 47#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/ldt.h> 48#include <asm/ldt.h>
48#include <asm/processor.h> 49#include <asm/processor.h>
49#include <asm/i387.h> 50#include <asm/i387.h>
@@ -56,10 +57,8 @@
56 57
57#include <asm/tlbflush.h> 58#include <asm/tlbflush.h>
58#include <asm/cpu.h> 59#include <asm/cpu.h>
59#include <asm/kdebug.h>
60#include <asm/idle.h> 60#include <asm/idle.h>
61#include <asm/syscalls.h> 61#include <asm/syscalls.h>
62#include <asm/smp.h>
63#include <asm/ds.h> 62#include <asm/ds.h>
64 63
65asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 64asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
@@ -205,7 +204,7 @@ extern void kernel_thread_helper(void);
205/* 204/*
206 * Create a kernel thread 205 * Create a kernel thread
207 */ 206 */
208int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 207int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
209{ 208{
210 struct pt_regs regs; 209 struct pt_regs regs;
211 210
@@ -266,7 +265,7 @@ void flush_thread(void)
266 tsk->thread.debugreg3 = 0; 265 tsk->thread.debugreg3 = 0;
267 tsk->thread.debugreg6 = 0; 266 tsk->thread.debugreg6 = 0;
268 tsk->thread.debugreg7 = 0; 267 tsk->thread.debugreg7 = 0;
269 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); 268 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
270 clear_tsk_thread_flag(tsk, TIF_DEBUG); 269 clear_tsk_thread_flag(tsk, TIF_DEBUG);
271 /* 270 /*
272 * Forget coprocessor state.. 271 * Forget coprocessor state..
@@ -293,9 +292,9 @@ void prepare_to_copy(struct task_struct *tsk)
293 292
294int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 293int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
295 unsigned long unused, 294 unsigned long unused,
296 struct task_struct * p, struct pt_regs * regs) 295 struct task_struct *p, struct pt_regs *regs)
297{ 296{
298 struct pt_regs * childregs; 297 struct pt_regs *childregs;
299 struct task_struct *tsk; 298 struct task_struct *tsk;
300 int err; 299 int err;
301 300
@@ -347,7 +346,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
347void 346void
348start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) 347start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
349{ 348{
350 __asm__("movl %0, %%gs" :: "r"(0)); 349 __asm__("movl %0, %%gs" : : "r"(0));
351 regs->fs = 0; 350 regs->fs = 0;
352 set_fs(USER_DS); 351 set_fs(USER_DS);
353 regs->ds = __USER_DS; 352 regs->ds = __USER_DS;
@@ -638,7 +637,7 @@ asmlinkage int sys_vfork(struct pt_regs regs)
638asmlinkage int sys_execve(struct pt_regs regs) 637asmlinkage int sys_execve(struct pt_regs regs)
639{ 638{
640 int error; 639 int error;
641 char * filename; 640 char *filename;
642 641
643 filename = getname((char __user *) regs.bx); 642 filename = getname((char __user *) regs.bx);
644 error = PTR_ERR(filename); 643 error = PTR_ERR(filename);
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index a4b619c3310..55c46074eba 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -5,12 +5,11 @@
5#include <linux/percpu.h> 5#include <linux/percpu.h>
6#include <linux/kexec.h> 6#include <linux/kexec.h>
7#include <linux/crash_dump.h> 7#include <linux/crash_dump.h>
8#include <asm/smp.h> 8#include <linux/smp.h>
9#include <asm/percpu.h> 9#include <linux/topology.h>
10#include <asm/sections.h> 10#include <asm/sections.h>
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <asm/setup.h> 12#include <asm/setup.h>
13#include <asm/topology.h>
14#include <asm/mpspec.h> 13#include <asm/mpspec.h>
15#include <asm/apicdef.h> 14#include <asm/apicdef.h>
16#include <asm/highmem.h> 15#include <asm/highmem.h>
@@ -20,8 +19,8 @@ unsigned int num_processors;
20unsigned disabled_cpus __cpuinitdata; 19unsigned disabled_cpus __cpuinitdata;
21/* Processor that is doing the boot up */ 20/* Processor that is doing the boot up */
22unsigned int boot_cpu_physical_apicid = -1U; 21unsigned int boot_cpu_physical_apicid = -1U;
23unsigned int max_physical_apicid;
24EXPORT_SYMBOL(boot_cpu_physical_apicid); 22EXPORT_SYMBOL(boot_cpu_physical_apicid);
23unsigned int max_physical_apicid;
25 24
26/* Bitmask of physically existing CPUs */ 25/* Bitmask of physically existing CPUs */
27physid_mask_t phys_cpu_present_map; 26physid_mask_t phys_cpu_present_map;
@@ -131,7 +130,27 @@ static void __init setup_cpu_pda_map(void)
131 /* point to new pointer table */ 130 /* point to new pointer table */
132 _cpu_pda = new_cpu_pda; 131 _cpu_pda = new_cpu_pda;
133} 132}
134#endif 133
134#endif /* CONFIG_SMP && CONFIG_X86_64 */
135
136#ifdef CONFIG_X86_64
137
138/* correctly size the local cpu masks */
139static void setup_cpu_local_masks(void)
140{
141 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
142 alloc_bootmem_cpumask_var(&cpu_callin_mask);
143 alloc_bootmem_cpumask_var(&cpu_callout_mask);
144 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
145}
146
147#else /* CONFIG_X86_32 */
148
149static inline void setup_cpu_local_masks(void)
150{
151}
152
153#endif /* CONFIG_X86_32 */
135 154
136/* 155/*
137 * Great future plan: 156 * Great future plan:
@@ -187,6 +206,9 @@ void __init setup_per_cpu_areas(void)
187 206
188 /* Setup node to cpumask map */ 207 /* Setup node to cpumask map */
189 setup_node_to_cpumask_map(); 208 setup_node_to_cpumask_map();
209
210 /* Setup cpu initialized, callin, callout masks */
211 setup_cpu_local_masks();
190} 212}
191 213
192#endif 214#endif
@@ -280,8 +302,8 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable)
280 302
281 cpulist_scnprintf(buf, sizeof(buf), mask); 303 cpulist_scnprintf(buf, sizeof(buf), mask);
282 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 304 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
283 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf); 305 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
284 } 306}
285 307
286void __cpuinit numa_add_cpu(int cpu) 308void __cpuinit numa_add_cpu(int cpu)
287{ 309{
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index beea2649a24..e6faa3316bd 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Intel SMP support routines. 2 * Intel SMP support routines.
3 * 3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> 4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> 5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6 * (c) 2002,2003 Andi Kleen, SuSE Labs. 6 * (c) 2002,2003 Andi Kleen, SuSE Labs.
7 * 7 *
@@ -128,16 +128,23 @@ void native_send_call_func_single_ipi(int cpu)
128 128
129void native_send_call_func_ipi(const struct cpumask *mask) 129void native_send_call_func_ipi(const struct cpumask *mask)
130{ 130{
131 cpumask_t allbutself; 131 cpumask_var_t allbutself;
132 132
133 allbutself = cpu_online_map; 133 if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
134 cpu_clear(smp_processor_id(), allbutself); 134 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
135 return;
136 }
135 137
136 if (cpus_equal(*mask, allbutself) && 138 cpumask_copy(allbutself, cpu_online_mask);
137 cpus_equal(cpu_online_map, cpu_callout_map)) 139 cpumask_clear_cpu(smp_processor_id(), allbutself);
140
141 if (cpumask_equal(mask, allbutself) &&
142 cpumask_equal(cpu_online_mask, cpu_callout_mask))
138 send_IPI_allbutself(CALL_FUNCTION_VECTOR); 143 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
139 else 144 else
140 send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 145 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
146
147 free_cpumask_var(allbutself);
141} 148}
142 149
143/* 150/*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6bd4d9b7387..bb1a3b1fc87 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * x86 SMP booting functions 2 * x86 SMP booting functions
3 * 3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> 4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> 5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs. 6 * Copyright 2001 Andi Kleen, SuSE Labs.
7 * 7 *
@@ -102,9 +102,6 @@ EXPORT_SYMBOL(smp_num_siblings);
102/* Last level cache ID of each logical CPU */ 102/* Last level cache ID of each logical CPU */
103DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; 103DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
104 104
105cpumask_t cpu_callin_map;
106cpumask_t cpu_callout_map;
107
108/* representing HT siblings of each logical CPU */ 105/* representing HT siblings of each logical CPU */
109DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); 106DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
110EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 107EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
@@ -120,9 +117,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
120static atomic_t init_deasserted; 117static atomic_t init_deasserted;
121 118
122 119
123/* representing cpus for which sibling maps can be computed */
124static cpumask_t cpu_sibling_setup_map;
125
126/* Set if we find a B stepping CPU */ 120/* Set if we find a B stepping CPU */
127static int __cpuinitdata smp_b_stepping; 121static int __cpuinitdata smp_b_stepping;
128 122
@@ -140,7 +134,7 @@ EXPORT_SYMBOL(cpu_to_node_map);
140static void map_cpu_to_node(int cpu, int node) 134static void map_cpu_to_node(int cpu, int node)
141{ 135{
142 printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); 136 printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
143 cpu_set(cpu, node_to_cpumask_map[node]); 137 cpumask_set_cpu(cpu, &node_to_cpumask_map[node]);
144 cpu_to_node_map[cpu] = node; 138 cpu_to_node_map[cpu] = node;
145} 139}
146 140
@@ -151,7 +145,7 @@ static void unmap_cpu_to_node(int cpu)
151 145
152 printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); 146 printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
153 for (node = 0; node < MAX_NUMNODES; node++) 147 for (node = 0; node < MAX_NUMNODES; node++)
154 cpu_clear(cpu, node_to_cpumask_map[node]); 148 cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]);
155 cpu_to_node_map[cpu] = 0; 149 cpu_to_node_map[cpu] = 0;
156} 150}
157#else /* !(CONFIG_NUMA && CONFIG_X86_32) */ 151#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
@@ -209,7 +203,7 @@ static void __cpuinit smp_callin(void)
209 */ 203 */
210 phys_id = read_apic_id(); 204 phys_id = read_apic_id();
211 cpuid = smp_processor_id(); 205 cpuid = smp_processor_id();
212 if (cpu_isset(cpuid, cpu_callin_map)) { 206 if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
213 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, 207 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
214 phys_id, cpuid); 208 phys_id, cpuid);
215 } 209 }
@@ -231,7 +225,7 @@ static void __cpuinit smp_callin(void)
231 /* 225 /*
232 * Has the boot CPU finished it's STARTUP sequence? 226 * Has the boot CPU finished it's STARTUP sequence?
233 */ 227 */
234 if (cpu_isset(cpuid, cpu_callout_map)) 228 if (cpumask_test_cpu(cpuid, cpu_callout_mask))
235 break; 229 break;
236 cpu_relax(); 230 cpu_relax();
237 } 231 }
@@ -274,7 +268,7 @@ static void __cpuinit smp_callin(void)
274 /* 268 /*
275 * Allow the master to continue. 269 * Allow the master to continue.
276 */ 270 */
277 cpu_set(cpuid, cpu_callin_map); 271 cpumask_set_cpu(cpuid, cpu_callin_mask);
278} 272}
279 273
280static int __cpuinitdata unsafe_smp; 274static int __cpuinitdata unsafe_smp;
@@ -332,7 +326,7 @@ notrace static void __cpuinit start_secondary(void *unused)
332 ipi_call_lock(); 326 ipi_call_lock();
333 lock_vector_lock(); 327 lock_vector_lock();
334 __setup_vector_irq(smp_processor_id()); 328 __setup_vector_irq(smp_processor_id());
335 cpu_set(smp_processor_id(), cpu_online_map); 329 set_cpu_online(smp_processor_id(), true);
336 unlock_vector_lock(); 330 unlock_vector_lock();
337 ipi_call_unlock(); 331 ipi_call_unlock();
338 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 332 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
@@ -438,50 +432,52 @@ void __cpuinit set_cpu_sibling_map(int cpu)
438 int i; 432 int i;
439 struct cpuinfo_x86 *c = &cpu_data(cpu); 433 struct cpuinfo_x86 *c = &cpu_data(cpu);
440 434
441 cpu_set(cpu, cpu_sibling_setup_map); 435 cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
442 436
443 if (smp_num_siblings > 1) { 437 if (smp_num_siblings > 1) {
444 for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { 438 for_each_cpu(i, cpu_sibling_setup_mask) {
445 if (c->phys_proc_id == cpu_data(i).phys_proc_id && 439 struct cpuinfo_x86 *o = &cpu_data(i);
446 c->cpu_core_id == cpu_data(i).cpu_core_id) { 440
447 cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 441 if (c->phys_proc_id == o->phys_proc_id &&
448 cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 442 c->cpu_core_id == o->cpu_core_id) {
449 cpu_set(i, per_cpu(cpu_core_map, cpu)); 443 cpumask_set_cpu(i, cpu_sibling_mask(cpu));
450 cpu_set(cpu, per_cpu(cpu_core_map, i)); 444 cpumask_set_cpu(cpu, cpu_sibling_mask(i));
451 cpu_set(i, c->llc_shared_map); 445 cpumask_set_cpu(i, cpu_core_mask(cpu));
452 cpu_set(cpu, cpu_data(i).llc_shared_map); 446 cpumask_set_cpu(cpu, cpu_core_mask(i));
447 cpumask_set_cpu(i, &c->llc_shared_map);
448 cpumask_set_cpu(cpu, &o->llc_shared_map);
453 } 449 }
454 } 450 }
455 } else { 451 } else {
456 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 452 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
457 } 453 }
458 454
459 cpu_set(cpu, c->llc_shared_map); 455 cpumask_set_cpu(cpu, &c->llc_shared_map);
460 456
461 if (current_cpu_data.x86_max_cores == 1) { 457 if (current_cpu_data.x86_max_cores == 1) {
462 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); 458 cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
463 c->booted_cores = 1; 459 c->booted_cores = 1;
464 return; 460 return;
465 } 461 }
466 462
467 for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { 463 for_each_cpu(i, cpu_sibling_setup_mask) {
468 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && 464 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
469 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { 465 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
470 cpu_set(i, c->llc_shared_map); 466 cpumask_set_cpu(i, &c->llc_shared_map);
471 cpu_set(cpu, cpu_data(i).llc_shared_map); 467 cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map);
472 } 468 }
473 if (c->phys_proc_id == cpu_data(i).phys_proc_id) { 469 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
474 cpu_set(i, per_cpu(cpu_core_map, cpu)); 470 cpumask_set_cpu(i, cpu_core_mask(cpu));
475 cpu_set(cpu, per_cpu(cpu_core_map, i)); 471 cpumask_set_cpu(cpu, cpu_core_mask(i));
476 /* 472 /*
477 * Does this new cpu bringup a new core? 473 * Does this new cpu bringup a new core?
478 */ 474 */
479 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { 475 if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
480 /* 476 /*
481 * for each core in package, increment 477 * for each core in package, increment
482 * the booted_cores for this new cpu 478 * the booted_cores for this new cpu
483 */ 479 */
484 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) 480 if (cpumask_first(cpu_sibling_mask(i)) == i)
485 c->booted_cores++; 481 c->booted_cores++;
486 /* 482 /*
487 * increment the core count for all 483 * increment the core count for all
@@ -504,7 +500,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
504 * And for power savings, we return cpu_core_map 500 * And for power savings, we return cpu_core_map
505 */ 501 */
506 if (sched_mc_power_savings || sched_smt_power_savings) 502 if (sched_mc_power_savings || sched_smt_power_savings)
507 return &per_cpu(cpu_core_map, cpu); 503 return cpu_core_mask(cpu);
508 else 504 else
509 return &c->llc_shared_map; 505 return &c->llc_shared_map;
510} 506}
@@ -523,7 +519,7 @@ static void impress_friends(void)
523 */ 519 */
524 pr_debug("Before bogomips.\n"); 520 pr_debug("Before bogomips.\n");
525 for_each_possible_cpu(cpu) 521 for_each_possible_cpu(cpu)
526 if (cpu_isset(cpu, cpu_callout_map)) 522 if (cpumask_test_cpu(cpu, cpu_callout_mask))
527 bogosum += cpu_data(cpu).loops_per_jiffy; 523 bogosum += cpu_data(cpu).loops_per_jiffy;
528 printk(KERN_INFO 524 printk(KERN_INFO
529 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 525 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
@@ -904,19 +900,19 @@ do_rest:
904 * allow APs to start initializing. 900 * allow APs to start initializing.
905 */ 901 */
906 pr_debug("Before Callout %d.\n", cpu); 902 pr_debug("Before Callout %d.\n", cpu);
907 cpu_set(cpu, cpu_callout_map); 903 cpumask_set_cpu(cpu, cpu_callout_mask);
908 pr_debug("After Callout %d.\n", cpu); 904 pr_debug("After Callout %d.\n", cpu);
909 905
910 /* 906 /*
911 * Wait 5s total for a response 907 * Wait 5s total for a response
912 */ 908 */
913 for (timeout = 0; timeout < 50000; timeout++) { 909 for (timeout = 0; timeout < 50000; timeout++) {
914 if (cpu_isset(cpu, cpu_callin_map)) 910 if (cpumask_test_cpu(cpu, cpu_callin_mask))
915 break; /* It has booted */ 911 break; /* It has booted */
916 udelay(100); 912 udelay(100);
917 } 913 }
918 914
919 if (cpu_isset(cpu, cpu_callin_map)) { 915 if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
920 /* number CPUs logically, starting from 1 (BSP is 0) */ 916 /* number CPUs logically, starting from 1 (BSP is 0) */
921 pr_debug("OK.\n"); 917 pr_debug("OK.\n");
922 printk(KERN_INFO "CPU%d: ", cpu); 918 printk(KERN_INFO "CPU%d: ", cpu);
@@ -941,9 +937,14 @@ restore_state:
941 if (boot_error) { 937 if (boot_error) {
942 /* Try to put things back the way they were before ... */ 938 /* Try to put things back the way they were before ... */
943 numa_remove_cpu(cpu); /* was set by numa_add_cpu */ 939 numa_remove_cpu(cpu); /* was set by numa_add_cpu */
944 cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */ 940
945 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ 941 /* was set by do_boot_cpu() */
946 cpu_clear(cpu, cpu_present_map); 942 cpumask_clear_cpu(cpu, cpu_callout_mask);
943
944 /* was set by cpu_init() */
945 cpumask_clear_cpu(cpu, cpu_initialized_mask);
946
947 set_cpu_present(cpu, false);
947 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; 948 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
948 } 949 }
949 950
@@ -977,7 +978,7 @@ int __cpuinit native_cpu_up(unsigned int cpu)
977 /* 978 /*
978 * Already booted CPU? 979 * Already booted CPU?
979 */ 980 */
980 if (cpu_isset(cpu, cpu_callin_map)) { 981 if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
981 pr_debug("do_boot_cpu %d Already started\n", cpu); 982 pr_debug("do_boot_cpu %d Already started\n", cpu);
982 return -ENOSYS; 983 return -ENOSYS;
983 } 984 }
@@ -1032,8 +1033,9 @@ int __cpuinit native_cpu_up(unsigned int cpu)
1032 */ 1033 */
1033static __init void disable_smp(void) 1034static __init void disable_smp(void)
1034{ 1035{
1035 cpu_present_map = cpumask_of_cpu(0); 1036 /* use the read/write pointers to the present and possible maps */
1036 cpu_possible_map = cpumask_of_cpu(0); 1037 cpumask_copy(&cpu_present_map, cpumask_of(0));
1038 cpumask_copy(&cpu_possible_map, cpumask_of(0));
1037 smpboot_clear_io_apic_irqs(); 1039 smpboot_clear_io_apic_irqs();
1038 1040
1039 if (smp_found_config) 1041 if (smp_found_config)
@@ -1041,8 +1043,8 @@ static __init void disable_smp(void)
1041 else 1043 else
1042 physid_set_mask_of_physid(0, &phys_cpu_present_map); 1044 physid_set_mask_of_physid(0, &phys_cpu_present_map);
1043 map_cpu_to_logical_apicid(); 1045 map_cpu_to_logical_apicid();
1044 cpu_set(0, per_cpu(cpu_sibling_map, 0)); 1046 cpumask_set_cpu(0, cpu_sibling_mask(0));
1045 cpu_set(0, per_cpu(cpu_core_map, 0)); 1047 cpumask_set_cpu(0, cpu_core_mask(0));
1046} 1048}
1047 1049
1048/* 1050/*
@@ -1064,14 +1066,14 @@ static int __init smp_sanity_check(unsigned max_cpus)
1064 nr = 0; 1066 nr = 0;
1065 for_each_present_cpu(cpu) { 1067 for_each_present_cpu(cpu) {
1066 if (nr >= 8) 1068 if (nr >= 8)
1067 cpu_clear(cpu, cpu_present_map); 1069 set_cpu_present(cpu, false);
1068 nr++; 1070 nr++;
1069 } 1071 }
1070 1072
1071 nr = 0; 1073 nr = 0;
1072 for_each_possible_cpu(cpu) { 1074 for_each_possible_cpu(cpu) {
1073 if (nr >= 8) 1075 if (nr >= 8)
1074 cpu_clear(cpu, cpu_possible_map); 1076 set_cpu_possible(cpu, false);
1075 nr++; 1077 nr++;
1076 } 1078 }
1077 1079
@@ -1167,7 +1169,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1167 preempt_disable(); 1169 preempt_disable();
1168 smp_cpu_index_default(); 1170 smp_cpu_index_default();
1169 current_cpu_data = boot_cpu_data; 1171 current_cpu_data = boot_cpu_data;
1170 cpu_callin_map = cpumask_of_cpu(0); 1172 cpumask_copy(cpu_callin_mask, cpumask_of(0));
1171 mb(); 1173 mb();
1172 /* 1174 /*
1173 * Setup boot CPU information 1175 * Setup boot CPU information
@@ -1242,8 +1244,8 @@ void __init native_smp_prepare_boot_cpu(void)
1242 init_gdt(me); 1244 init_gdt(me);
1243#endif 1245#endif
1244 switch_to_new_gdt(); 1246 switch_to_new_gdt();
1245 /* already set me in cpu_online_map in boot_cpu_init() */ 1247 /* already set me in cpu_online_mask in boot_cpu_init() */
1246 cpu_set(me, cpu_callout_map); 1248 cpumask_set_cpu(me, cpu_callout_mask);
1247 per_cpu(cpu_state, me) = CPU_ONLINE; 1249 per_cpu(cpu_state, me) = CPU_ONLINE;
1248} 1250}
1249 1251
@@ -1311,7 +1313,7 @@ __init void prefill_possible_map(void)
1311 possible, max_t(int, possible - num_processors, 0)); 1313 possible, max_t(int, possible - num_processors, 0));
1312 1314
1313 for (i = 0; i < possible; i++) 1315 for (i = 0; i < possible; i++)
1314 cpu_set(i, cpu_possible_map); 1316 set_cpu_possible(i, true);
1315 1317
1316 nr_cpu_ids = possible; 1318 nr_cpu_ids = possible;
1317} 1319}
@@ -1323,31 +1325,31 @@ static void remove_siblinginfo(int cpu)
1323 int sibling; 1325 int sibling;
1324 struct cpuinfo_x86 *c = &cpu_data(cpu); 1326 struct cpuinfo_x86 *c = &cpu_data(cpu);
1325 1327
1326 for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) { 1328 for_each_cpu(sibling, cpu_core_mask(cpu)) {
1327 cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); 1329 cpumask_clear_cpu(cpu, cpu_core_mask(sibling));
1328 /*/ 1330 /*/
1329 * last thread sibling in this cpu core going down 1331 * last thread sibling in this cpu core going down
1330 */ 1332 */
1331 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) 1333 if (cpumask_weight(cpu_sibling_mask(cpu)) == 1)
1332 cpu_data(sibling).booted_cores--; 1334 cpu_data(sibling).booted_cores--;
1333 } 1335 }
1334 1336
1335 for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu)) 1337 for_each_cpu(sibling, cpu_sibling_mask(cpu))
1336 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); 1338 cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
1337 cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1339 cpumask_clear(cpu_sibling_mask(cpu));
1338 cpus_clear(per_cpu(cpu_core_map, cpu)); 1340 cpumask_clear(cpu_core_mask(cpu));
1339 c->phys_proc_id = 0; 1341 c->phys_proc_id = 0;
1340 c->cpu_core_id = 0; 1342 c->cpu_core_id = 0;
1341 cpu_clear(cpu, cpu_sibling_setup_map); 1343 cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1342} 1344}
1343 1345
1344static void __ref remove_cpu_from_maps(int cpu) 1346static void __ref remove_cpu_from_maps(int cpu)
1345{ 1347{
1346 cpu_clear(cpu, cpu_online_map); 1348 set_cpu_online(cpu, false);
1347 cpu_clear(cpu, cpu_callout_map); 1349 cpumask_clear_cpu(cpu, cpu_callout_mask);
1348 cpu_clear(cpu, cpu_callin_map); 1350 cpumask_clear_cpu(cpu, cpu_callin_mask);
1349 /* was set by cpu_init() */ 1351 /* was set by cpu_init() */
1350 cpu_clear(cpu, cpu_initialized); 1352 cpumask_clear_cpu(cpu, cpu_initialized_mask);
1351 numa_remove_cpu(cpu); 1353 numa_remove_cpu(cpu);
1352} 1354}
1353 1355
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index 65309e4cb1c..3985cac0ed4 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -105,8 +105,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
105 high bit of the PPI port B (0x61). Note that some PS/2s, 105 high bit of the PPI port B (0x61). Note that some PS/2s,
106 notably the 55SX, work fine if this is removed. */ 106 notably the 55SX, work fine if this is removed. */
107 107
108 u8 irq_v = inb_p( 0x61 ); /* read the current state */ 108 u8 irq_v = inb_p(0x61); /* read the current state */
109 outb_p( irq_v|0x80, 0x61 ); /* reset the IRQ */ 109 outb_p(irq_v | 0x80, 0x61); /* reset the IRQ */
110 } 110 }
111#endif 111#endif
112 112
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c
index 891e7a7c433..e6e695acd72 100644
--- a/arch/x86/kernel/time_64.c
+++ b/arch/x86/kernel/time_64.c
@@ -17,10 +17,10 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/time.h> 18#include <linux/time.h>
19#include <linux/mca.h> 19#include <linux/mca.h>
20#include <linux/nmi.h>
20 21
21#include <asm/i8253.h> 22#include <asm/i8253.h>
22#include <asm/hpet.h> 23#include <asm/hpet.h>
23#include <asm/nmi.h>
24#include <asm/vgtod.h> 24#include <asm/vgtod.h>
25#include <asm/time.h> 25#include <asm/time.h>
26#include <asm/timer.h> 26#include <asm/timer.h>
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index ce6650eb64e..98c2d055284 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -20,7 +20,6 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/ptrace.h> 21#include <linux/ptrace.h>
22#include <linux/string.h> 22#include <linux/string.h>
23#include <linux/unwind.h>
24#include <linux/delay.h> 23#include <linux/delay.h>
25#include <linux/errno.h> 24#include <linux/errno.h>
26#include <linux/kexec.h> 25#include <linux/kexec.h>
@@ -51,7 +50,6 @@
51#include <asm/debugreg.h> 50#include <asm/debugreg.h>
52#include <asm/atomic.h> 51#include <asm/atomic.h>
53#include <asm/system.h> 52#include <asm/system.h>
54#include <asm/unwind.h>
55#include <asm/traps.h> 53#include <asm/traps.h>
56#include <asm/desc.h> 54#include <asm/desc.h>
57#include <asm/i387.h> 55#include <asm/i387.h>
@@ -65,9 +63,6 @@
65#else 63#else
66#include <asm/processor-flags.h> 64#include <asm/processor-flags.h>
67#include <asm/arch_hooks.h> 65#include <asm/arch_hooks.h>
68#include <asm/nmi.h>
69#include <asm/smp.h>
70#include <asm/io.h>
71#include <asm/traps.h> 66#include <asm/traps.h>
72 67
73#include "cpu/mcheck/mce.h" 68#include "cpu/mcheck/mce.h"
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 0c9667f0752..d801d06af06 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -176,33 +176,31 @@ static int __init visws_get_smp_config(unsigned int early)
176 * No problem for Linux. 176 * No problem for Linux.
177 */ 177 */
178 178
179static void __init MP_processor_info(struct mpc_config_processor *m) 179static void __init MP_processor_info(struct mpc_cpu *m)
180{ 180{
181 int ver, logical_apicid; 181 int ver, logical_apicid;
182 physid_mask_t apic_cpus; 182 physid_mask_t apic_cpus;
183 183
184 if (!(m->mpc_cpuflag & CPU_ENABLED)) 184 if (!(m->cpuflag & CPU_ENABLED))
185 return; 185 return;
186 186
187 logical_apicid = m->mpc_apicid; 187 logical_apicid = m->apicid;
188 printk(KERN_INFO "%sCPU #%d %u:%u APIC version %d\n", 188 printk(KERN_INFO "%sCPU #%d %u:%u APIC version %d\n",
189 m->mpc_cpuflag & CPU_BOOTPROCESSOR ? "Bootup " : "", 189 m->cpuflag & CPU_BOOTPROCESSOR ? "Bootup " : "",
190 m->mpc_apicid, 190 m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8,
191 (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, 191 (m->cpufeature & CPU_MODEL_MASK) >> 4, m->apicver);
192 (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
193 m->mpc_apicver);
194 192
195 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) 193 if (m->cpuflag & CPU_BOOTPROCESSOR)
196 boot_cpu_physical_apicid = m->mpc_apicid; 194 boot_cpu_physical_apicid = m->apicid;
197 195
198 ver = m->mpc_apicver; 196 ver = m->apicver;
199 if ((ver >= 0x14 && m->mpc_apicid >= 0xff) || m->mpc_apicid >= 0xf) { 197 if ((ver >= 0x14 && m->apicid >= 0xff) || m->apicid >= 0xf) {
200 printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n", 198 printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
201 m->mpc_apicid, MAX_APICS); 199 m->apicid, MAX_APICS);
202 return; 200 return;
203 } 201 }
204 202
205 apic_cpus = apicid_to_cpu_present(m->mpc_apicid); 203 apic_cpus = apicid_to_cpu_present(m->apicid);
206 physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus); 204 physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus);
207 /* 205 /*
208 * Validate version 206 * Validate version
@@ -210,15 +208,15 @@ static void __init MP_processor_info(struct mpc_config_processor *m)
210 if (ver == 0x0) { 208 if (ver == 0x0) {
211 printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! " 209 printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! "
212 "fixing up to 0x10. (tell your hw vendor)\n", 210 "fixing up to 0x10. (tell your hw vendor)\n",
213 m->mpc_apicid); 211 m->apicid);
214 ver = 0x10; 212 ver = 0x10;
215 } 213 }
216 apic_version[m->mpc_apicid] = ver; 214 apic_version[m->apicid] = ver;
217} 215}
218 216
219static int __init visws_find_smp_config(unsigned int reserve) 217static int __init visws_find_smp_config(unsigned int reserve)
220{ 218{
221 struct mpc_config_processor *mp = phys_to_virt(CO_CPU_TAB_PHYS); 219 struct mpc_cpu *mp = phys_to_virt(CO_CPU_TAB_PHYS);
222 unsigned short ncpus = readw(phys_to_virt(CO_CPU_NUM_PHYS)); 220 unsigned short ncpus = readw(phys_to_virt(CO_CPU_NUM_PHYS));
223 221
224 if (ncpus > CO_CPU_MAX) { 222 if (ncpus > CO_CPU_MAX) {