aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-21 18:40:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-21 18:40:24 -0400
commitec965350bb98bd291eb34f6ecddfdcfc36da1e6e (patch)
tree983bcaf33ed00b48a86f7f8790cc460cf15dd252 /arch/x86
parent5f033bb9bc5cb3bb37a79e3ef131f50ecdcb72b0 (diff)
parent486fdae21458bd9f4e125099bb3c38a4064e450e (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel: (62 commits) sched: build fix sched: better rt-group documentation sched: features fix sched: /debug/sched_features sched: add SCHED_FEAT_DEADLINE sched: debug: show a weight tree sched: fair: weight calculations sched: fair-group: de-couple load-balancing from the rb-trees sched: fair-group scheduling vs latency sched: rt-group: optimize dequeue_rt_stack sched: debug: add some debug code to handle the full hierarchy sched: fair-group: SMP-nice for group scheduling sched, cpuset: customize sched domains, core sched, cpuset: customize sched domains, docs sched: prepatory code movement sched: rt: multi level group constraints sched: task_group hierarchy sched: fix the task_group hierarchy for UID grouping sched: allow the group scheduler to have multiple levels sched: mix tasks and groups ...
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/kernel/acpi/cstate.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c28
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c32
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c13
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c20
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c92
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c46
-rw-r--r--arch/x86/kernel/io_apic_64.c2
-rw-r--r--arch/x86/kernel/microcode.c16
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/setup.c28
-rw-r--r--arch/x86/mm/numa_64.c3
-rw-r--r--arch/x86/oprofile/nmi_int.c49
14 files changed, 201 insertions, 137 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 07cf77113565..87a693cf2bb7 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -117,6 +117,9 @@ config ARCH_HAS_CPU_RELAX
117config HAVE_SETUP_PER_CPU_AREA 117config HAVE_SETUP_PER_CPU_AREA
118 def_bool X86_64 || (X86_SMP && !X86_VOYAGER) 118 def_bool X86_64 || (X86_SMP && !X86_VOYAGER)
119 119
120config HAVE_CPUMASK_OF_CPU_MAP
121 def_bool X86_64_SMP
122
120config ARCH_HIBERNATION_POSSIBLE 123config ARCH_HIBERNATION_POSSIBLE
121 def_bool y 124 def_bool y
122 depends on !SMP || !X86_VOYAGER 125 depends on !SMP || !X86_VOYAGER
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 9366fb68d8d8..c2502eb9aa83 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -91,7 +91,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
91 91
92 /* Make sure we are running on right CPU */ 92 /* Make sure we are running on right CPU */
93 saved_mask = current->cpus_allowed; 93 saved_mask = current->cpus_allowed;
94 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); 94 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
95 if (retval) 95 if (retval)
96 return -1; 96 return -1;
97 97
@@ -128,7 +128,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
128 cx->address); 128 cx->address);
129 129
130out: 130out:
131 set_cpus_allowed(current, saved_mask); 131 set_cpus_allowed_ptr(current, &saved_mask);
132 return retval; 132 return retval;
133} 133}
134EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); 134EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index a962dcb9c408..e2d870de837c 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -192,9 +192,9 @@ static void drv_read(struct drv_cmd *cmd)
192 cpumask_t saved_mask = current->cpus_allowed; 192 cpumask_t saved_mask = current->cpus_allowed;
193 cmd->val = 0; 193 cmd->val = 0;
194 194
195 set_cpus_allowed(current, cmd->mask); 195 set_cpus_allowed_ptr(current, &cmd->mask);
196 do_drv_read(cmd); 196 do_drv_read(cmd);
197 set_cpus_allowed(current, saved_mask); 197 set_cpus_allowed_ptr(current, &saved_mask);
198} 198}
199 199
200static void drv_write(struct drv_cmd *cmd) 200static void drv_write(struct drv_cmd *cmd)
@@ -203,30 +203,30 @@ static void drv_write(struct drv_cmd *cmd)
203 unsigned int i; 203 unsigned int i;
204 204
205 for_each_cpu_mask(i, cmd->mask) { 205 for_each_cpu_mask(i, cmd->mask) {
206 set_cpus_allowed(current, cpumask_of_cpu(i)); 206 set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
207 do_drv_write(cmd); 207 do_drv_write(cmd);
208 } 208 }
209 209
210 set_cpus_allowed(current, saved_mask); 210 set_cpus_allowed_ptr(current, &saved_mask);
211 return; 211 return;
212} 212}
213 213
214static u32 get_cur_val(cpumask_t mask) 214static u32 get_cur_val(const cpumask_t *mask)
215{ 215{
216 struct acpi_processor_performance *perf; 216 struct acpi_processor_performance *perf;
217 struct drv_cmd cmd; 217 struct drv_cmd cmd;
218 218
219 if (unlikely(cpus_empty(mask))) 219 if (unlikely(cpus_empty(*mask)))
220 return 0; 220 return 0;
221 221
222 switch (per_cpu(drv_data, first_cpu(mask))->cpu_feature) { 222 switch (per_cpu(drv_data, first_cpu(*mask))->cpu_feature) {
223 case SYSTEM_INTEL_MSR_CAPABLE: 223 case SYSTEM_INTEL_MSR_CAPABLE:
224 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 224 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
225 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 225 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
226 break; 226 break;
227 case SYSTEM_IO_CAPABLE: 227 case SYSTEM_IO_CAPABLE:
228 cmd.type = SYSTEM_IO_CAPABLE; 228 cmd.type = SYSTEM_IO_CAPABLE;
229 perf = per_cpu(drv_data, first_cpu(mask))->acpi_data; 229 perf = per_cpu(drv_data, first_cpu(*mask))->acpi_data;
230 cmd.addr.io.port = perf->control_register.address; 230 cmd.addr.io.port = perf->control_register.address;
231 cmd.addr.io.bit_width = perf->control_register.bit_width; 231 cmd.addr.io.bit_width = perf->control_register.bit_width;
232 break; 232 break;
@@ -234,7 +234,7 @@ static u32 get_cur_val(cpumask_t mask)
234 return 0; 234 return 0;
235 } 235 }
236 236
237 cmd.mask = mask; 237 cmd.mask = *mask;
238 238
239 drv_read(&cmd); 239 drv_read(&cmd);
240 240
@@ -271,7 +271,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
271 unsigned int retval; 271 unsigned int retval;
272 272
273 saved_mask = current->cpus_allowed; 273 saved_mask = current->cpus_allowed;
274 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 274 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
275 if (get_cpu() != cpu) { 275 if (get_cpu() != cpu) {
276 /* We were not able to run on requested processor */ 276 /* We were not able to run on requested processor */
277 put_cpu(); 277 put_cpu();
@@ -329,7 +329,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
329 retval = per_cpu(drv_data, cpu)->max_freq * perf_percent / 100; 329 retval = per_cpu(drv_data, cpu)->max_freq * perf_percent / 100;
330 330
331 put_cpu(); 331 put_cpu();
332 set_cpus_allowed(current, saved_mask); 332 set_cpus_allowed_ptr(current, &saved_mask);
333 333
334 dprintk("cpu %d: performance percent %d\n", cpu, perf_percent); 334 dprintk("cpu %d: performance percent %d\n", cpu, perf_percent);
335 return retval; 335 return retval;
@@ -347,13 +347,13 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
347 return 0; 347 return 0;
348 } 348 }
349 349
350 freq = extract_freq(get_cur_val(cpumask_of_cpu(cpu)), data); 350 freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
351 dprintk("cur freq = %u\n", freq); 351 dprintk("cur freq = %u\n", freq);
352 352
353 return freq; 353 return freq;
354} 354}
355 355
356static unsigned int check_freqs(cpumask_t mask, unsigned int freq, 356static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq,
357 struct acpi_cpufreq_data *data) 357 struct acpi_cpufreq_data *data)
358{ 358{
359 unsigned int cur_freq; 359 unsigned int cur_freq;
@@ -449,7 +449,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
449 drv_write(&cmd); 449 drv_write(&cmd);
450 450
451 if (acpi_pstate_strict) { 451 if (acpi_pstate_strict) {
452 if (!check_freqs(cmd.mask, freqs.new, data)) { 452 if (!check_freqs(&cmd.mask, freqs.new, data)) {
453 dprintk("acpi_cpufreq_target failed (%d)\n", 453 dprintk("acpi_cpufreq_target failed (%d)\n",
454 policy->cpu); 454 policy->cpu);
455 return -EAGAIN; 455 return -EAGAIN;
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index c99d59d8ef2e..46d4034d9f37 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -478,12 +478,12 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi
478 478
479static int check_supported_cpu(unsigned int cpu) 479static int check_supported_cpu(unsigned int cpu)
480{ 480{
481 cpumask_t oldmask = CPU_MASK_ALL; 481 cpumask_t oldmask;
482 u32 eax, ebx, ecx, edx; 482 u32 eax, ebx, ecx, edx;
483 unsigned int rc = 0; 483 unsigned int rc = 0;
484 484
485 oldmask = current->cpus_allowed; 485 oldmask = current->cpus_allowed;
486 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 486 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
487 487
488 if (smp_processor_id() != cpu) { 488 if (smp_processor_id() != cpu) {
489 printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); 489 printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
@@ -528,7 +528,7 @@ static int check_supported_cpu(unsigned int cpu)
528 rc = 1; 528 rc = 1;
529 529
530out: 530out:
531 set_cpus_allowed(current, oldmask); 531 set_cpus_allowed_ptr(current, &oldmask);
532 return rc; 532 return rc;
533} 533}
534 534
@@ -1015,7 +1015,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
1015/* Driver entry point to switch to the target frequency */ 1015/* Driver entry point to switch to the target frequency */
1016static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) 1016static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
1017{ 1017{
1018 cpumask_t oldmask = CPU_MASK_ALL; 1018 cpumask_t oldmask;
1019 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); 1019 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1020 u32 checkfid; 1020 u32 checkfid;
1021 u32 checkvid; 1021 u32 checkvid;
@@ -1030,7 +1030,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
1030 1030
1031 /* only run on specific CPU from here on */ 1031 /* only run on specific CPU from here on */
1032 oldmask = current->cpus_allowed; 1032 oldmask = current->cpus_allowed;
1033 set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); 1033 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
1034 1034
1035 if (smp_processor_id() != pol->cpu) { 1035 if (smp_processor_id() != pol->cpu) {
1036 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); 1036 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1085,7 +1085,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
1085 ret = 0; 1085 ret = 0;
1086 1086
1087err_out: 1087err_out:
1088 set_cpus_allowed(current, oldmask); 1088 set_cpus_allowed_ptr(current, &oldmask);
1089 return ret; 1089 return ret;
1090} 1090}
1091 1091
@@ -1104,7 +1104,7 @@ static int powernowk8_verify(struct cpufreq_policy *pol)
1104static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) 1104static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1105{ 1105{
1106 struct powernow_k8_data *data; 1106 struct powernow_k8_data *data;
1107 cpumask_t oldmask = CPU_MASK_ALL; 1107 cpumask_t oldmask;
1108 int rc; 1108 int rc;
1109 1109
1110 if (!cpu_online(pol->cpu)) 1110 if (!cpu_online(pol->cpu))
@@ -1145,7 +1145,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1145 1145
1146 /* only run on specific CPU from here on */ 1146 /* only run on specific CPU from here on */
1147 oldmask = current->cpus_allowed; 1147 oldmask = current->cpus_allowed;
1148 set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); 1148 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
1149 1149
1150 if (smp_processor_id() != pol->cpu) { 1150 if (smp_processor_id() != pol->cpu) {
1151 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); 1151 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1164,7 +1164,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1164 fidvid_msr_init(); 1164 fidvid_msr_init();
1165 1165
1166 /* run on any CPU again */ 1166 /* run on any CPU again */
1167 set_cpus_allowed(current, oldmask); 1167 set_cpus_allowed_ptr(current, &oldmask);
1168 1168
1169 if (cpu_family == CPU_HW_PSTATE) 1169 if (cpu_family == CPU_HW_PSTATE)
1170 pol->cpus = cpumask_of_cpu(pol->cpu); 1170 pol->cpus = cpumask_of_cpu(pol->cpu);
@@ -1205,7 +1205,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1205 return 0; 1205 return 0;
1206 1206
1207err_out: 1207err_out:
1208 set_cpus_allowed(current, oldmask); 1208 set_cpus_allowed_ptr(current, &oldmask);
1209 powernow_k8_cpu_exit_acpi(data); 1209 powernow_k8_cpu_exit_acpi(data);
1210 1210
1211 kfree(data); 1211 kfree(data);
@@ -1242,10 +1242,11 @@ static unsigned int powernowk8_get (unsigned int cpu)
1242 if (!data) 1242 if (!data)
1243 return -EINVAL; 1243 return -EINVAL;
1244 1244
1245 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 1245 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
1246 if (smp_processor_id() != cpu) { 1246 if (smp_processor_id() != cpu) {
1247 printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu); 1247 printk(KERN_ERR PFX
1248 set_cpus_allowed(current, oldmask); 1248 "limiting to CPU %d failed in powernowk8_get\n", cpu);
1249 set_cpus_allowed_ptr(current, &oldmask);
1249 return 0; 1250 return 0;
1250 } 1251 }
1251 1252
@@ -1253,13 +1254,14 @@ static unsigned int powernowk8_get (unsigned int cpu)
1253 goto out; 1254 goto out;
1254 1255
1255 if (cpu_family == CPU_HW_PSTATE) 1256 if (cpu_family == CPU_HW_PSTATE)
1256 khz = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); 1257 khz = find_khz_freq_from_pstate(data->powernow_table,
1258 data->currpstate);
1257 else 1259 else
1258 khz = find_khz_freq_from_fid(data->currfid); 1260 khz = find_khz_freq_from_fid(data->currfid);
1259 1261
1260 1262
1261out: 1263out:
1262 set_cpus_allowed(current, oldmask); 1264 set_cpus_allowed_ptr(current, &oldmask);
1263 return khz; 1265 return khz;
1264} 1266}
1265 1267
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index 3031f1196192..908dd347c67e 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -315,7 +315,7 @@ static unsigned int get_cur_freq(unsigned int cpu)
315 cpumask_t saved_mask; 315 cpumask_t saved_mask;
316 316
317 saved_mask = current->cpus_allowed; 317 saved_mask = current->cpus_allowed;
318 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 318 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
319 if (smp_processor_id() != cpu) 319 if (smp_processor_id() != cpu)
320 return 0; 320 return 0;
321 321
@@ -333,7 +333,7 @@ static unsigned int get_cur_freq(unsigned int cpu)
333 clock_freq = extract_clock(l, cpu, 1); 333 clock_freq = extract_clock(l, cpu, 1);
334 } 334 }
335 335
336 set_cpus_allowed(current, saved_mask); 336 set_cpus_allowed_ptr(current, &saved_mask);
337 return clock_freq; 337 return clock_freq;
338} 338}
339 339
@@ -487,7 +487,7 @@ static int centrino_target (struct cpufreq_policy *policy,
487 else 487 else
488 cpu_set(j, set_mask); 488 cpu_set(j, set_mask);
489 489
490 set_cpus_allowed(current, set_mask); 490 set_cpus_allowed_ptr(current, &set_mask);
491 preempt_disable(); 491 preempt_disable();
492 if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) { 492 if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
493 dprintk("couldn't limit to CPUs in this domain\n"); 493 dprintk("couldn't limit to CPUs in this domain\n");
@@ -555,7 +555,8 @@ static int centrino_target (struct cpufreq_policy *policy,
555 555
556 if (!cpus_empty(covered_cpus)) { 556 if (!cpus_empty(covered_cpus)) {
557 for_each_cpu_mask(j, covered_cpus) { 557 for_each_cpu_mask(j, covered_cpus) {
558 set_cpus_allowed(current, cpumask_of_cpu(j)); 558 set_cpus_allowed_ptr(current,
559 &cpumask_of_cpu(j));
559 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); 560 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
560 } 561 }
561 } 562 }
@@ -569,12 +570,12 @@ static int centrino_target (struct cpufreq_policy *policy,
569 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 570 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
570 } 571 }
571 } 572 }
572 set_cpus_allowed(current, saved_mask); 573 set_cpus_allowed_ptr(current, &saved_mask);
573 return 0; 574 return 0;
574 575
575migrate_end: 576migrate_end:
576 preempt_enable(); 577 preempt_enable();
577 set_cpus_allowed(current, saved_mask); 578 set_cpus_allowed_ptr(current, &saved_mask);
578 return 0; 579 return 0;
579} 580}
580 581
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 14d68aa301ee..1b50244b1fdf 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -229,22 +229,22 @@ static unsigned int speedstep_detect_chipset (void)
229 return 0; 229 return 0;
230} 230}
231 231
232static unsigned int _speedstep_get(cpumask_t cpus) 232static unsigned int _speedstep_get(const cpumask_t *cpus)
233{ 233{
234 unsigned int speed; 234 unsigned int speed;
235 cpumask_t cpus_allowed; 235 cpumask_t cpus_allowed;
236 236
237 cpus_allowed = current->cpus_allowed; 237 cpus_allowed = current->cpus_allowed;
238 set_cpus_allowed(current, cpus); 238 set_cpus_allowed_ptr(current, cpus);
239 speed = speedstep_get_processor_frequency(speedstep_processor); 239 speed = speedstep_get_processor_frequency(speedstep_processor);
240 set_cpus_allowed(current, cpus_allowed); 240 set_cpus_allowed_ptr(current, &cpus_allowed);
241 dprintk("detected %u kHz as current frequency\n", speed); 241 dprintk("detected %u kHz as current frequency\n", speed);
242 return speed; 242 return speed;
243} 243}
244 244
245static unsigned int speedstep_get(unsigned int cpu) 245static unsigned int speedstep_get(unsigned int cpu)
246{ 246{
247 return _speedstep_get(cpumask_of_cpu(cpu)); 247 return _speedstep_get(&cpumask_of_cpu(cpu));
248} 248}
249 249
250/** 250/**
@@ -267,7 +267,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
267 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) 267 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate))
268 return -EINVAL; 268 return -EINVAL;
269 269
270 freqs.old = _speedstep_get(policy->cpus); 270 freqs.old = _speedstep_get(&policy->cpus);
271 freqs.new = speedstep_freqs[newstate].frequency; 271 freqs.new = speedstep_freqs[newstate].frequency;
272 freqs.cpu = policy->cpu; 272 freqs.cpu = policy->cpu;
273 273
@@ -285,12 +285,12 @@ static int speedstep_target (struct cpufreq_policy *policy,
285 } 285 }
286 286
287 /* switch to physical CPU where state is to be changed */ 287 /* switch to physical CPU where state is to be changed */
288 set_cpus_allowed(current, policy->cpus); 288 set_cpus_allowed_ptr(current, &policy->cpus);
289 289
290 speedstep_set_state(newstate); 290 speedstep_set_state(newstate);
291 291
292 /* allow to be run on all CPUs */ 292 /* allow to be run on all CPUs */
293 set_cpus_allowed(current, cpus_allowed); 293 set_cpus_allowed_ptr(current, &cpus_allowed);
294 294
295 for_each_cpu_mask(i, policy->cpus) { 295 for_each_cpu_mask(i, policy->cpus) {
296 freqs.cpu = i; 296 freqs.cpu = i;
@@ -326,7 +326,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
326#endif 326#endif
327 327
328 cpus_allowed = current->cpus_allowed; 328 cpus_allowed = current->cpus_allowed;
329 set_cpus_allowed(current, policy->cpus); 329 set_cpus_allowed_ptr(current, &policy->cpus);
330 330
331 /* detect low and high frequency and transition latency */ 331 /* detect low and high frequency and transition latency */
332 result = speedstep_get_freqs(speedstep_processor, 332 result = speedstep_get_freqs(speedstep_processor,
@@ -334,12 +334,12 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
334 &speedstep_freqs[SPEEDSTEP_HIGH].frequency, 334 &speedstep_freqs[SPEEDSTEP_HIGH].frequency,
335 &policy->cpuinfo.transition_latency, 335 &policy->cpuinfo.transition_latency,
336 &speedstep_set_state); 336 &speedstep_set_state);
337 set_cpus_allowed(current, cpus_allowed); 337 set_cpus_allowed_ptr(current, &cpus_allowed);
338 if (result) 338 if (result)
339 return result; 339 return result;
340 340
341 /* get current speed setting */ 341 /* get current speed setting */
342 speed = _speedstep_get(policy->cpus); 342 speed = _speedstep_get(&policy->cpus);
343 if (!speed) 343 if (!speed)
344 return -EIO; 344 return -EIO;
345 345
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 1b889860eb73..26d615dcb149 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -129,7 +129,7 @@ struct _cpuid4_info {
129 union _cpuid4_leaf_ebx ebx; 129 union _cpuid4_leaf_ebx ebx;
130 union _cpuid4_leaf_ecx ecx; 130 union _cpuid4_leaf_ecx ecx;
131 unsigned long size; 131 unsigned long size;
132 cpumask_t shared_cpu_map; 132 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
133}; 133};
134 134
135unsigned short num_cache_leaves; 135unsigned short num_cache_leaves;
@@ -451,8 +451,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
451} 451}
452 452
453/* pointer to _cpuid4_info array (for each cache leaf) */ 453/* pointer to _cpuid4_info array (for each cache leaf) */
454static struct _cpuid4_info *cpuid4_info[NR_CPUS]; 454static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
455#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y])) 455#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
456 456
457#ifdef CONFIG_SMP 457#ifdef CONFIG_SMP
458static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 458static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
@@ -474,7 +474,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
474 if (cpu_data(i).apicid >> index_msb == 474 if (cpu_data(i).apicid >> index_msb ==
475 c->apicid >> index_msb) { 475 c->apicid >> index_msb) {
476 cpu_set(i, this_leaf->shared_cpu_map); 476 cpu_set(i, this_leaf->shared_cpu_map);
477 if (i != cpu && cpuid4_info[i]) { 477 if (i != cpu && per_cpu(cpuid4_info, i)) {
478 sibling_leaf = CPUID4_INFO_IDX(i, index); 478 sibling_leaf = CPUID4_INFO_IDX(i, index);
479 cpu_set(cpu, sibling_leaf->shared_cpu_map); 479 cpu_set(cpu, sibling_leaf->shared_cpu_map);
480 } 480 }
@@ -505,8 +505,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
505 for (i = 0; i < num_cache_leaves; i++) 505 for (i = 0; i < num_cache_leaves; i++)
506 cache_remove_shared_cpu_map(cpu, i); 506 cache_remove_shared_cpu_map(cpu, i);
507 507
508 kfree(cpuid4_info[cpu]); 508 kfree(per_cpu(cpuid4_info, cpu));
509 cpuid4_info[cpu] = NULL; 509 per_cpu(cpuid4_info, cpu) = NULL;
510} 510}
511 511
512static int __cpuinit detect_cache_attributes(unsigned int cpu) 512static int __cpuinit detect_cache_attributes(unsigned int cpu)
@@ -519,13 +519,13 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
519 if (num_cache_leaves == 0) 519 if (num_cache_leaves == 0)
520 return -ENOENT; 520 return -ENOENT;
521 521
522 cpuid4_info[cpu] = kzalloc( 522 per_cpu(cpuid4_info, cpu) = kzalloc(
523 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); 523 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
524 if (cpuid4_info[cpu] == NULL) 524 if (per_cpu(cpuid4_info, cpu) == NULL)
525 return -ENOMEM; 525 return -ENOMEM;
526 526
527 oldmask = current->cpus_allowed; 527 oldmask = current->cpus_allowed;
528 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); 528 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
529 if (retval) 529 if (retval)
530 goto out; 530 goto out;
531 531
@@ -542,12 +542,12 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
542 } 542 }
543 cache_shared_cpu_map_setup(cpu, j); 543 cache_shared_cpu_map_setup(cpu, j);
544 } 544 }
545 set_cpus_allowed(current, oldmask); 545 set_cpus_allowed_ptr(current, &oldmask);
546 546
547out: 547out:
548 if (retval) { 548 if (retval) {
549 kfree(cpuid4_info[cpu]); 549 kfree(per_cpu(cpuid4_info, cpu));
550 cpuid4_info[cpu] = NULL; 550 per_cpu(cpuid4_info, cpu) = NULL;
551 } 551 }
552 552
553 return retval; 553 return retval;
@@ -561,7 +561,7 @@ out:
561extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ 561extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
562 562
563/* pointer to kobject for cpuX/cache */ 563/* pointer to kobject for cpuX/cache */
564static struct kobject * cache_kobject[NR_CPUS]; 564static DEFINE_PER_CPU(struct kobject *, cache_kobject);
565 565
566struct _index_kobject { 566struct _index_kobject {
567 struct kobject kobj; 567 struct kobject kobj;
@@ -570,8 +570,8 @@ struct _index_kobject {
570}; 570};
571 571
572/* pointer to array of kobjects for cpuX/cache/indexY */ 572/* pointer to array of kobjects for cpuX/cache/indexY */
573static struct _index_kobject *index_kobject[NR_CPUS]; 573static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
574#define INDEX_KOBJECT_PTR(x,y) (&((index_kobject[x])[y])) 574#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
575 575
576#define show_one_plus(file_name, object, val) \ 576#define show_one_plus(file_name, object, val) \
577static ssize_t show_##file_name \ 577static ssize_t show_##file_name \
@@ -591,11 +591,32 @@ static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
591 return sprintf (buf, "%luK\n", this_leaf->size / 1024); 591 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
592} 592}
593 593
594static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf) 594static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
595 int type, char *buf)
595{ 596{
596 char mask_str[NR_CPUS]; 597 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
597 cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map); 598 int n = 0;
598 return sprintf(buf, "%s\n", mask_str); 599
600 if (len > 1) {
601 cpumask_t *mask = &this_leaf->shared_cpu_map;
602
603 n = type?
604 cpulist_scnprintf(buf, len-2, *mask):
605 cpumask_scnprintf(buf, len-2, *mask);
606 buf[n++] = '\n';
607 buf[n] = '\0';
608 }
609 return n;
610}
611
612static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
613{
614 return show_shared_cpu_map_func(leaf, 0, buf);
615}
616
617static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
618{
619 return show_shared_cpu_map_func(leaf, 1, buf);
599} 620}
600 621
601static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) { 622static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
@@ -633,6 +654,7 @@ define_one_ro(ways_of_associativity);
633define_one_ro(number_of_sets); 654define_one_ro(number_of_sets);
634define_one_ro(size); 655define_one_ro(size);
635define_one_ro(shared_cpu_map); 656define_one_ro(shared_cpu_map);
657define_one_ro(shared_cpu_list);
636 658
637static struct attribute * default_attrs[] = { 659static struct attribute * default_attrs[] = {
638 &type.attr, 660 &type.attr,
@@ -643,6 +665,7 @@ static struct attribute * default_attrs[] = {
643 &number_of_sets.attr, 665 &number_of_sets.attr,
644 &size.attr, 666 &size.attr,
645 &shared_cpu_map.attr, 667 &shared_cpu_map.attr,
668 &shared_cpu_list.attr,
646 NULL 669 NULL
647}; 670};
648 671
@@ -684,10 +707,10 @@ static struct kobj_type ktype_percpu_entry = {
684 707
685static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) 708static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
686{ 709{
687 kfree(cache_kobject[cpu]); 710 kfree(per_cpu(cache_kobject, cpu));
688 kfree(index_kobject[cpu]); 711 kfree(per_cpu(index_kobject, cpu));
689 cache_kobject[cpu] = NULL; 712 per_cpu(cache_kobject, cpu) = NULL;
690 index_kobject[cpu] = NULL; 713 per_cpu(index_kobject, cpu) = NULL;
691 free_cache_attributes(cpu); 714 free_cache_attributes(cpu);
692} 715}
693 716
@@ -703,13 +726,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
703 return err; 726 return err;
704 727
705 /* Allocate all required memory */ 728 /* Allocate all required memory */
706 cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL); 729 per_cpu(cache_kobject, cpu) =
707 if (unlikely(cache_kobject[cpu] == NULL)) 730 kzalloc(sizeof(struct kobject), GFP_KERNEL);
731 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
708 goto err_out; 732 goto err_out;
709 733
710 index_kobject[cpu] = kzalloc( 734 per_cpu(index_kobject, cpu) = kzalloc(
711 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL); 735 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
712 if (unlikely(index_kobject[cpu] == NULL)) 736 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
713 goto err_out; 737 goto err_out;
714 738
715 return 0; 739 return 0;
@@ -733,7 +757,8 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
733 if (unlikely(retval < 0)) 757 if (unlikely(retval < 0))
734 return retval; 758 return retval;
735 759
736 retval = kobject_init_and_add(cache_kobject[cpu], &ktype_percpu_entry, 760 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
761 &ktype_percpu_entry,
737 &sys_dev->kobj, "%s", "cache"); 762 &sys_dev->kobj, "%s", "cache");
738 if (retval < 0) { 763 if (retval < 0) {
739 cpuid4_cache_sysfs_exit(cpu); 764 cpuid4_cache_sysfs_exit(cpu);
@@ -745,13 +770,14 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
745 this_object->cpu = cpu; 770 this_object->cpu = cpu;
746 this_object->index = i; 771 this_object->index = i;
747 retval = kobject_init_and_add(&(this_object->kobj), 772 retval = kobject_init_and_add(&(this_object->kobj),
748 &ktype_cache, cache_kobject[cpu], 773 &ktype_cache,
774 per_cpu(cache_kobject, cpu),
749 "index%1lu", i); 775 "index%1lu", i);
750 if (unlikely(retval)) { 776 if (unlikely(retval)) {
751 for (j = 0; j < i; j++) { 777 for (j = 0; j < i; j++) {
752 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj)); 778 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
753 } 779 }
754 kobject_put(cache_kobject[cpu]); 780 kobject_put(per_cpu(cache_kobject, cpu));
755 cpuid4_cache_sysfs_exit(cpu); 781 cpuid4_cache_sysfs_exit(cpu);
756 break; 782 break;
757 } 783 }
@@ -760,7 +786,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
760 if (!retval) 786 if (!retval)
761 cpu_set(cpu, cache_dev_map); 787 cpu_set(cpu, cache_dev_map);
762 788
763 kobject_uevent(cache_kobject[cpu], KOBJ_ADD); 789 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
764 return retval; 790 return retval;
765} 791}
766 792
@@ -769,7 +795,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
769 unsigned int cpu = sys_dev->id; 795 unsigned int cpu = sys_dev->id;
770 unsigned long i; 796 unsigned long i;
771 797
772 if (cpuid4_info[cpu] == NULL) 798 if (per_cpu(cpuid4_info, cpu) == NULL)
773 return; 799 return;
774 if (!cpu_isset(cpu, cache_dev_map)) 800 if (!cpu_isset(cpu, cache_dev_map))
775 return; 801 return;
@@ -777,7 +803,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
777 803
778 for (i = 0; i < num_cache_leaves; i++) 804 for (i = 0; i < num_cache_leaves; i++)
779 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); 805 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
780 kobject_put(cache_kobject[cpu]); 806 kobject_put(per_cpu(cache_kobject, cpu));
781 cpuid4_cache_sysfs_exit(cpu); 807 cpuid4_cache_sysfs_exit(cpu);
782} 808}
783 809
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 32671da8184e..7c9a813e1193 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -251,18 +251,18 @@ struct threshold_attr {
251 ssize_t(*store) (struct threshold_block *, const char *, size_t count); 251 ssize_t(*store) (struct threshold_block *, const char *, size_t count);
252}; 252};
253 253
254static cpumask_t affinity_set(unsigned int cpu) 254static void affinity_set(unsigned int cpu, cpumask_t *oldmask,
255 cpumask_t *newmask)
255{ 256{
256 cpumask_t oldmask = current->cpus_allowed; 257 *oldmask = current->cpus_allowed;
257 cpumask_t newmask = CPU_MASK_NONE; 258 cpus_clear(*newmask);
258 cpu_set(cpu, newmask); 259 cpu_set(cpu, *newmask);
259 set_cpus_allowed(current, newmask); 260 set_cpus_allowed_ptr(current, newmask);
260 return oldmask;
261} 261}
262 262
263static void affinity_restore(cpumask_t oldmask) 263static void affinity_restore(const cpumask_t *oldmask)
264{ 264{
265 set_cpus_allowed(current, oldmask); 265 set_cpus_allowed_ptr(current, oldmask);
266} 266}
267 267
268#define SHOW_FIELDS(name) \ 268#define SHOW_FIELDS(name) \
@@ -277,15 +277,15 @@ static ssize_t store_interrupt_enable(struct threshold_block *b,
277 const char *buf, size_t count) 277 const char *buf, size_t count)
278{ 278{
279 char *end; 279 char *end;
280 cpumask_t oldmask; 280 cpumask_t oldmask, newmask;
281 unsigned long new = simple_strtoul(buf, &end, 0); 281 unsigned long new = simple_strtoul(buf, &end, 0);
282 if (end == buf) 282 if (end == buf)
283 return -EINVAL; 283 return -EINVAL;
284 b->interrupt_enable = !!new; 284 b->interrupt_enable = !!new;
285 285
286 oldmask = affinity_set(b->cpu); 286 affinity_set(b->cpu, &oldmask, &newmask);
287 threshold_restart_bank(b, 0, 0); 287 threshold_restart_bank(b, 0, 0);
288 affinity_restore(oldmask); 288 affinity_restore(&oldmask);
289 289
290 return end - buf; 290 return end - buf;
291} 291}
@@ -294,7 +294,7 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
294 const char *buf, size_t count) 294 const char *buf, size_t count)
295{ 295{
296 char *end; 296 char *end;
297 cpumask_t oldmask; 297 cpumask_t oldmask, newmask;
298 u16 old; 298 u16 old;
299 unsigned long new = simple_strtoul(buf, &end, 0); 299 unsigned long new = simple_strtoul(buf, &end, 0);
300 if (end == buf) 300 if (end == buf)
@@ -306,9 +306,9 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
306 old = b->threshold_limit; 306 old = b->threshold_limit;
307 b->threshold_limit = new; 307 b->threshold_limit = new;
308 308
309 oldmask = affinity_set(b->cpu); 309 affinity_set(b->cpu, &oldmask, &newmask);
310 threshold_restart_bank(b, 0, old); 310 threshold_restart_bank(b, 0, old);
311 affinity_restore(oldmask); 311 affinity_restore(&oldmask);
312 312
313 return end - buf; 313 return end - buf;
314} 314}
@@ -316,10 +316,10 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
316static ssize_t show_error_count(struct threshold_block *b, char *buf) 316static ssize_t show_error_count(struct threshold_block *b, char *buf)
317{ 317{
318 u32 high, low; 318 u32 high, low;
319 cpumask_t oldmask; 319 cpumask_t oldmask, newmask;
320 oldmask = affinity_set(b->cpu); 320 affinity_set(b->cpu, &oldmask, &newmask);
321 rdmsr(b->address, low, high); 321 rdmsr(b->address, low, high);
322 affinity_restore(oldmask); 322 affinity_restore(&oldmask);
323 return sprintf(buf, "%x\n", 323 return sprintf(buf, "%x\n",
324 (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit)); 324 (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit));
325} 325}
@@ -327,10 +327,10 @@ static ssize_t show_error_count(struct threshold_block *b, char *buf)
327static ssize_t store_error_count(struct threshold_block *b, 327static ssize_t store_error_count(struct threshold_block *b,
328 const char *buf, size_t count) 328 const char *buf, size_t count)
329{ 329{
330 cpumask_t oldmask; 330 cpumask_t oldmask, newmask;
331 oldmask = affinity_set(b->cpu); 331 affinity_set(b->cpu, &oldmask, &newmask);
332 threshold_restart_bank(b, 1, 0); 332 threshold_restart_bank(b, 1, 0);
333 affinity_restore(oldmask); 333 affinity_restore(&oldmask);
334 return 1; 334 return 1;
335} 335}
336 336
@@ -468,7 +468,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
468{ 468{
469 int i, err = 0; 469 int i, err = 0;
470 struct threshold_bank *b = NULL; 470 struct threshold_bank *b = NULL;
471 cpumask_t oldmask = CPU_MASK_NONE; 471 cpumask_t oldmask, newmask;
472 char name[32]; 472 char name[32];
473 473
474 sprintf(name, "threshold_bank%i", bank); 474 sprintf(name, "threshold_bank%i", bank);
@@ -519,10 +519,10 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
519 519
520 per_cpu(threshold_banks, cpu)[bank] = b; 520 per_cpu(threshold_banks, cpu)[bank] = b;
521 521
522 oldmask = affinity_set(cpu); 522 affinity_set(cpu, &oldmask, &newmask);
523 err = allocate_threshold_blocks(cpu, bank, 0, 523 err = allocate_threshold_blocks(cpu, bank, 0,
524 MSR_IA32_MC0_MISC + bank * 4); 524 MSR_IA32_MC0_MISC + bank * 4);
525 affinity_restore(oldmask); 525 affinity_restore(&oldmask);
526 526
527 if (err) 527 if (err)
528 goto out_free; 528 goto out_free;
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index b54464b26658..9ba11d07920f 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -785,7 +785,7 @@ static void __clear_irq_vector(int irq)
785 per_cpu(vector_irq, cpu)[vector] = -1; 785 per_cpu(vector_irq, cpu)[vector] = -1;
786 786
787 cfg->vector = 0; 787 cfg->vector = 0;
788 cfg->domain = CPU_MASK_NONE; 788 cpus_clear(cfg->domain);
789} 789}
790 790
791void __setup_vector_irq(int cpu) 791void __setup_vector_irq(int cpu)
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c
index 25cf6dee4e56..69729e38b78a 100644
--- a/arch/x86/kernel/microcode.c
+++ b/arch/x86/kernel/microcode.c
@@ -402,7 +402,7 @@ static int do_microcode_update (void)
402 402
403 if (!uci->valid) 403 if (!uci->valid)
404 continue; 404 continue;
405 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 405 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
406 error = get_maching_microcode(new_mc, cpu); 406 error = get_maching_microcode(new_mc, cpu);
407 if (error < 0) 407 if (error < 0)
408 goto out; 408 goto out;
@@ -416,7 +416,7 @@ out:
416 vfree(new_mc); 416 vfree(new_mc);
417 if (cursor < 0) 417 if (cursor < 0)
418 error = cursor; 418 error = cursor;
419 set_cpus_allowed(current, old); 419 set_cpus_allowed_ptr(current, &old);
420 return error; 420 return error;
421} 421}
422 422
@@ -579,7 +579,7 @@ static int apply_microcode_check_cpu(int cpu)
579 return 0; 579 return 0;
580 580
581 old = current->cpus_allowed; 581 old = current->cpus_allowed;
582 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 582 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
583 583
584 /* Check if the microcode we have in memory matches the CPU */ 584 /* Check if the microcode we have in memory matches the CPU */
585 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || 585 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
@@ -610,7 +610,7 @@ static int apply_microcode_check_cpu(int cpu)
610 " sig=0x%x, pf=0x%x, rev=0x%x\n", 610 " sig=0x%x, pf=0x%x, rev=0x%x\n",
611 cpu, uci->sig, uci->pf, uci->rev); 611 cpu, uci->sig, uci->pf, uci->rev);
612 612
613 set_cpus_allowed(current, old); 613 set_cpus_allowed_ptr(current, &old);
614 return err; 614 return err;
615} 615}
616 616
@@ -621,13 +621,13 @@ static void microcode_init_cpu(int cpu, int resume)
621 621
622 old = current->cpus_allowed; 622 old = current->cpus_allowed;
623 623
624 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 624 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
625 mutex_lock(&microcode_mutex); 625 mutex_lock(&microcode_mutex);
626 collect_cpu_info(cpu); 626 collect_cpu_info(cpu);
627 if (uci->valid && system_state == SYSTEM_RUNNING && !resume) 627 if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
628 cpu_request_microcode(cpu); 628 cpu_request_microcode(cpu);
629 mutex_unlock(&microcode_mutex); 629 mutex_unlock(&microcode_mutex);
630 set_cpus_allowed(current, old); 630 set_cpus_allowed_ptr(current, &old);
631} 631}
632 632
633static void microcode_fini_cpu(int cpu) 633static void microcode_fini_cpu(int cpu)
@@ -657,14 +657,14 @@ static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz)
657 old = current->cpus_allowed; 657 old = current->cpus_allowed;
658 658
659 get_online_cpus(); 659 get_online_cpus();
660 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 660 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
661 661
662 mutex_lock(&microcode_mutex); 662 mutex_lock(&microcode_mutex);
663 if (uci->valid) 663 if (uci->valid)
664 err = cpu_request_microcode(cpu); 664 err = cpu_request_microcode(cpu);
665 mutex_unlock(&microcode_mutex); 665 mutex_unlock(&microcode_mutex);
666 put_online_cpus(); 666 put_online_cpus();
667 set_cpus_allowed(current, old); 667 set_cpus_allowed_ptr(current, &old);
668 } 668 }
669 if (err) 669 if (err)
670 return err; 670 return err;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 9692202d3bfb..19c9386ac118 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -420,7 +420,7 @@ static void native_machine_shutdown(void)
420 reboot_cpu_id = smp_processor_id(); 420 reboot_cpu_id = smp_processor_id();
421 421
422 /* Make certain I only run on the appropriate processor */ 422 /* Make certain I only run on the appropriate processor */
423 set_cpus_allowed(current, cpumask_of_cpu(reboot_cpu_id)); 423 set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id));
424 424
425 /* O.K Now that I'm on the appropriate processor, 425 /* O.K Now that I'm on the appropriate processor,
426 * stop all of the others. 426 * stop all of the others.
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index ed157c90412e..0d1f44ae6eea 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -54,6 +54,24 @@ static void __init setup_per_cpu_maps(void)
54#endif 54#endif
55} 55}
56 56
57#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
58cpumask_t *cpumask_of_cpu_map __read_mostly;
59EXPORT_SYMBOL(cpumask_of_cpu_map);
60
61/* requires nr_cpu_ids to be initialized */
62static void __init setup_cpumask_of_cpu(void)
63{
64 int i;
65
66 /* alloc_bootmem zeroes memory */
67 cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
68 for (i = 0; i < nr_cpu_ids; i++)
69 cpu_set(i, cpumask_of_cpu_map[i]);
70}
71#else
72static inline void setup_cpumask_of_cpu(void) { }
73#endif
74
57#ifdef CONFIG_X86_32 75#ifdef CONFIG_X86_32
58/* 76/*
59 * Great future not-so-futuristic plan: make i386 and x86_64 do it 77 * Great future not-so-futuristic plan: make i386 and x86_64 do it
@@ -70,7 +88,7 @@ EXPORT_SYMBOL(__per_cpu_offset);
70 */ 88 */
71void __init setup_per_cpu_areas(void) 89void __init setup_per_cpu_areas(void)
72{ 90{
73 int i; 91 int i, highest_cpu = 0;
74 unsigned long size; 92 unsigned long size;
75 93
76#ifdef CONFIG_HOTPLUG_CPU 94#ifdef CONFIG_HOTPLUG_CPU
@@ -104,10 +122,18 @@ void __init setup_per_cpu_areas(void)
104 __per_cpu_offset[i] = ptr - __per_cpu_start; 122 __per_cpu_offset[i] = ptr - __per_cpu_start;
105#endif 123#endif
106 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 124 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
125
126 highest_cpu = i;
107 } 127 }
108 128
129 nr_cpu_ids = highest_cpu + 1;
130 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d\n", NR_CPUS, nr_cpu_ids);
131
109 /* Setup percpu data maps */ 132 /* Setup percpu data maps */
110 setup_per_cpu_maps(); 133 setup_per_cpu_maps();
134
135 /* Setup cpumask_of_cpu map */
136 setup_cpumask_of_cpu();
111} 137}
112 138
113#endif 139#endif
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index cb3170186355..9a6892200b27 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -386,9 +386,10 @@ static int __init split_nodes_by_size(struct bootnode *nodes, u64 *addr,
386 * Sets up the system RAM area from start_pfn to end_pfn according to the 386 * Sets up the system RAM area from start_pfn to end_pfn according to the
387 * numa=fake command-line option. 387 * numa=fake command-line option.
388 */ 388 */
389static struct bootnode nodes[MAX_NUMNODES] __initdata;
390
389static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn) 391static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
390{ 392{
391 struct bootnode nodes[MAX_NUMNODES];
392 u64 size, addr = start_pfn << PAGE_SHIFT; 393 u64 size, addr = start_pfn << PAGE_SHIFT;
393 u64 max_addr = end_pfn << PAGE_SHIFT; 394 u64 max_addr = end_pfn << PAGE_SHIFT;
394 int num_nodes = 0, num = 0, coeff_flag, coeff = -1, i; 395 int num_nodes = 0, num = 0, coeff_flag, coeff = -1, i;
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 1f11cf0a307f..cc48d3fde545 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -23,8 +23,8 @@
23#include "op_x86_model.h" 23#include "op_x86_model.h"
24 24
25static struct op_x86_model_spec const *model; 25static struct op_x86_model_spec const *model;
26static struct op_msrs cpu_msrs[NR_CPUS]; 26static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
27static unsigned long saved_lvtpc[NR_CPUS]; 27static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
28 28
29static int nmi_start(void); 29static int nmi_start(void);
30static void nmi_stop(void); 30static void nmi_stop(void);
@@ -89,7 +89,7 @@ static int profile_exceptions_notify(struct notifier_block *self,
89 89
90 switch (val) { 90 switch (val) {
91 case DIE_NMI: 91 case DIE_NMI:
92 if (model->check_ctrs(args->regs, &cpu_msrs[cpu])) 92 if (model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu)))
93 ret = NOTIFY_STOP; 93 ret = NOTIFY_STOP;
94 break; 94 break;
95 default: 95 default:
@@ -126,7 +126,7 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs)
126static void nmi_save_registers(void *dummy) 126static void nmi_save_registers(void *dummy)
127{ 127{
128 int cpu = smp_processor_id(); 128 int cpu = smp_processor_id();
129 struct op_msrs *msrs = &cpu_msrs[cpu]; 129 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
130 nmi_cpu_save_registers(msrs); 130 nmi_cpu_save_registers(msrs);
131} 131}
132 132
@@ -134,10 +134,10 @@ static void free_msrs(void)
134{ 134{
135 int i; 135 int i;
136 for_each_possible_cpu(i) { 136 for_each_possible_cpu(i) {
137 kfree(cpu_msrs[i].counters); 137 kfree(per_cpu(cpu_msrs, i).counters);
138 cpu_msrs[i].counters = NULL; 138 per_cpu(cpu_msrs, i).counters = NULL;
139 kfree(cpu_msrs[i].controls); 139 kfree(per_cpu(cpu_msrs, i).controls);
140 cpu_msrs[i].controls = NULL; 140 per_cpu(cpu_msrs, i).controls = NULL;
141 } 141 }
142} 142}
143 143
@@ -149,13 +149,15 @@ static int allocate_msrs(void)
149 149
150 int i; 150 int i;
151 for_each_possible_cpu(i) { 151 for_each_possible_cpu(i) {
152 cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL); 152 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
153 if (!cpu_msrs[i].counters) { 153 GFP_KERNEL);
154 if (!per_cpu(cpu_msrs, i).counters) {
154 success = 0; 155 success = 0;
155 break; 156 break;
156 } 157 }
157 cpu_msrs[i].controls = kmalloc(controls_size, GFP_KERNEL); 158 per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
158 if (!cpu_msrs[i].controls) { 159 GFP_KERNEL);
160 if (!per_cpu(cpu_msrs, i).controls) {
159 success = 0; 161 success = 0;
160 break; 162 break;
161 } 163 }
@@ -170,11 +172,11 @@ static int allocate_msrs(void)
170static void nmi_cpu_setup(void *dummy) 172static void nmi_cpu_setup(void *dummy)
171{ 173{
172 int cpu = smp_processor_id(); 174 int cpu = smp_processor_id();
173 struct op_msrs *msrs = &cpu_msrs[cpu]; 175 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
174 spin_lock(&oprofilefs_lock); 176 spin_lock(&oprofilefs_lock);
175 model->setup_ctrs(msrs); 177 model->setup_ctrs(msrs);
176 spin_unlock(&oprofilefs_lock); 178 spin_unlock(&oprofilefs_lock);
177 saved_lvtpc[cpu] = apic_read(APIC_LVTPC); 179 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
178 apic_write(APIC_LVTPC, APIC_DM_NMI); 180 apic_write(APIC_LVTPC, APIC_DM_NMI);
179} 181}
180 182
@@ -203,13 +205,15 @@ static int nmi_setup(void)
203 */ 205 */
204 206
205 /* Assume saved/restored counters are the same on all CPUs */ 207 /* Assume saved/restored counters are the same on all CPUs */
206 model->fill_in_addresses(&cpu_msrs[0]); 208 model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
207 for_each_possible_cpu(cpu) { 209 for_each_possible_cpu(cpu) {
208 if (cpu != 0) { 210 if (cpu != 0) {
209 memcpy(cpu_msrs[cpu].counters, cpu_msrs[0].counters, 211 memcpy(per_cpu(cpu_msrs, cpu).counters,
212 per_cpu(cpu_msrs, 0).counters,
210 sizeof(struct op_msr) * model->num_counters); 213 sizeof(struct op_msr) * model->num_counters);
211 214
212 memcpy(cpu_msrs[cpu].controls, cpu_msrs[0].controls, 215 memcpy(per_cpu(cpu_msrs, cpu).controls,
216 per_cpu(cpu_msrs, 0).controls,
213 sizeof(struct op_msr) * model->num_controls); 217 sizeof(struct op_msr) * model->num_controls);
214 } 218 }
215 219
@@ -249,7 +253,7 @@ static void nmi_cpu_shutdown(void *dummy)
249{ 253{
250 unsigned int v; 254 unsigned int v;
251 int cpu = smp_processor_id(); 255 int cpu = smp_processor_id();
252 struct op_msrs *msrs = &cpu_msrs[cpu]; 256 struct op_msrs *msrs = &__get_cpu_var(cpu_msrs);
253 257
254 /* restoring APIC_LVTPC can trigger an apic error because the delivery 258 /* restoring APIC_LVTPC can trigger an apic error because the delivery
255 * mode and vector nr combination can be illegal. That's by design: on 259 * mode and vector nr combination can be illegal. That's by design: on
@@ -258,23 +262,24 @@ static void nmi_cpu_shutdown(void *dummy)
258 */ 262 */
259 v = apic_read(APIC_LVTERR); 263 v = apic_read(APIC_LVTERR);
260 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); 264 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
261 apic_write(APIC_LVTPC, saved_lvtpc[cpu]); 265 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
262 apic_write(APIC_LVTERR, v); 266 apic_write(APIC_LVTERR, v);
263 nmi_restore_registers(msrs); 267 nmi_restore_registers(msrs);
264} 268}
265 269
266static void nmi_shutdown(void) 270static void nmi_shutdown(void)
267{ 271{
272 struct op_msrs *msrs = &__get_cpu_var(cpu_msrs);
268 nmi_enabled = 0; 273 nmi_enabled = 0;
269 on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); 274 on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
270 unregister_die_notifier(&profile_exceptions_nb); 275 unregister_die_notifier(&profile_exceptions_nb);
271 model->shutdown(cpu_msrs); 276 model->shutdown(msrs);
272 free_msrs(); 277 free_msrs();
273} 278}
274 279
275static void nmi_cpu_start(void *dummy) 280static void nmi_cpu_start(void *dummy)
276{ 281{
277 struct op_msrs const *msrs = &cpu_msrs[smp_processor_id()]; 282 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
278 model->start(msrs); 283 model->start(msrs);
279} 284}
280 285
@@ -286,7 +291,7 @@ static int nmi_start(void)
286 291
287static void nmi_cpu_stop(void *dummy) 292static void nmi_cpu_stop(void *dummy)
288{ 293{
289 struct op_msrs const *msrs = &cpu_msrs[smp_processor_id()]; 294 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
290 model->stop(msrs); 295 model->stop(msrs);
291} 296}
292 297