diff options
-rw-r--r-- | arch/x86/kernel/acpi/cstate.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/ldt.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/microcode.c | 17 | ||||
-rw-r--r-- | arch/x86/kernel/reboot.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/setup_percpu.c | 10 | ||||
-rw-r--r-- | drivers/acpi/processor_throttling.c | 11 | ||||
-rw-r--r-- | drivers/firmware/dcdbas.c | 3 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_main.c | 3 | ||||
-rw-r--r-- | include/linux/cpumask.h | 41 | ||||
-rw-r--r-- | kernel/cpu.c | 113 | ||||
-rw-r--r-- | kernel/stop_machine.c | 3 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_sysprof.c | 4 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 5 | ||||
-rw-r--r-- | net/sunrpc/svc.c | 3 |
20 files changed, 159 insertions, 125 deletions
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 9220cf46aa10..c2502eb9aa83 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c | |||
@@ -73,7 +73,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, | |||
73 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 73 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
74 | 74 | ||
75 | cpumask_t saved_mask; | 75 | cpumask_t saved_mask; |
76 | cpumask_of_cpu_ptr(new_mask, cpu); | ||
77 | int retval; | 76 | int retval; |
78 | unsigned int eax, ebx, ecx, edx; | 77 | unsigned int eax, ebx, ecx, edx; |
79 | unsigned int edx_part; | 78 | unsigned int edx_part; |
@@ -92,7 +91,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, | |||
92 | 91 | ||
93 | /* Make sure we are running on right CPU */ | 92 | /* Make sure we are running on right CPU */ |
94 | saved_mask = current->cpus_allowed; | 93 | saved_mask = current->cpus_allowed; |
95 | retval = set_cpus_allowed_ptr(current, new_mask); | 94 | retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
96 | if (retval) | 95 | if (retval) |
97 | return -1; | 96 | return -1; |
98 | 97 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index ff2fff56f0a8..dd097b835839 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -200,12 +200,10 @@ static void drv_read(struct drv_cmd *cmd) | |||
200 | static void drv_write(struct drv_cmd *cmd) | 200 | static void drv_write(struct drv_cmd *cmd) |
201 | { | 201 | { |
202 | cpumask_t saved_mask = current->cpus_allowed; | 202 | cpumask_t saved_mask = current->cpus_allowed; |
203 | cpumask_of_cpu_ptr_declare(cpu_mask); | ||
204 | unsigned int i; | 203 | unsigned int i; |
205 | 204 | ||
206 | for_each_cpu_mask_nr(i, cmd->mask) { | 205 | for_each_cpu_mask_nr(i, cmd->mask) { |
207 | cpumask_of_cpu_ptr_next(cpu_mask, i); | 206 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); |
208 | set_cpus_allowed_ptr(current, cpu_mask); | ||
209 | do_drv_write(cmd); | 207 | do_drv_write(cmd); |
210 | } | 208 | } |
211 | 209 | ||
@@ -269,12 +267,11 @@ static unsigned int get_measured_perf(unsigned int cpu) | |||
269 | } aperf_cur, mperf_cur; | 267 | } aperf_cur, mperf_cur; |
270 | 268 | ||
271 | cpumask_t saved_mask; | 269 | cpumask_t saved_mask; |
272 | cpumask_of_cpu_ptr(cpu_mask, cpu); | ||
273 | unsigned int perf_percent; | 270 | unsigned int perf_percent; |
274 | unsigned int retval; | 271 | unsigned int retval; |
275 | 272 | ||
276 | saved_mask = current->cpus_allowed; | 273 | saved_mask = current->cpus_allowed; |
277 | set_cpus_allowed_ptr(current, cpu_mask); | 274 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
278 | if (get_cpu() != cpu) { | 275 | if (get_cpu() != cpu) { |
279 | /* We were not able to run on requested processor */ | 276 | /* We were not able to run on requested processor */ |
280 | put_cpu(); | 277 | put_cpu(); |
@@ -340,7 +337,6 @@ static unsigned int get_measured_perf(unsigned int cpu) | |||
340 | 337 | ||
341 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | 338 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) |
342 | { | 339 | { |
343 | cpumask_of_cpu_ptr(cpu_mask, cpu); | ||
344 | struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); | 340 | struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); |
345 | unsigned int freq; | 341 | unsigned int freq; |
346 | unsigned int cached_freq; | 342 | unsigned int cached_freq; |
@@ -353,7 +349,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | |||
353 | } | 349 | } |
354 | 350 | ||
355 | cached_freq = data->freq_table[data->acpi_data->state].frequency; | 351 | cached_freq = data->freq_table[data->acpi_data->state].frequency; |
356 | freq = extract_freq(get_cur_val(cpu_mask), data); | 352 | freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data); |
357 | if (freq != cached_freq) { | 353 | if (freq != cached_freq) { |
358 | /* | 354 | /* |
359 | * The dreaded BIOS frequency change behind our back. | 355 | * The dreaded BIOS frequency change behind our back. |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 53c7b6936973..c45ca6d4dce1 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -479,12 +479,11 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi | |||
479 | static int check_supported_cpu(unsigned int cpu) | 479 | static int check_supported_cpu(unsigned int cpu) |
480 | { | 480 | { |
481 | cpumask_t oldmask; | 481 | cpumask_t oldmask; |
482 | cpumask_of_cpu_ptr(cpu_mask, cpu); | ||
483 | u32 eax, ebx, ecx, edx; | 482 | u32 eax, ebx, ecx, edx; |
484 | unsigned int rc = 0; | 483 | unsigned int rc = 0; |
485 | 484 | ||
486 | oldmask = current->cpus_allowed; | 485 | oldmask = current->cpus_allowed; |
487 | set_cpus_allowed_ptr(current, cpu_mask); | 486 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
488 | 487 | ||
489 | if (smp_processor_id() != cpu) { | 488 | if (smp_processor_id() != cpu) { |
490 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); | 489 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); |
@@ -1017,7 +1016,6 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i | |||
1017 | static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) | 1016 | static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) |
1018 | { | 1017 | { |
1019 | cpumask_t oldmask; | 1018 | cpumask_t oldmask; |
1020 | cpumask_of_cpu_ptr(cpu_mask, pol->cpu); | ||
1021 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); | 1019 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); |
1022 | u32 checkfid; | 1020 | u32 checkfid; |
1023 | u32 checkvid; | 1021 | u32 checkvid; |
@@ -1032,7 +1030,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi | |||
1032 | 1030 | ||
1033 | /* only run on specific CPU from here on */ | 1031 | /* only run on specific CPU from here on */ |
1034 | oldmask = current->cpus_allowed; | 1032 | oldmask = current->cpus_allowed; |
1035 | set_cpus_allowed_ptr(current, cpu_mask); | 1033 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); |
1036 | 1034 | ||
1037 | if (smp_processor_id() != pol->cpu) { | 1035 | if (smp_processor_id() != pol->cpu) { |
1038 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | 1036 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); |
@@ -1107,7 +1105,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1107 | { | 1105 | { |
1108 | struct powernow_k8_data *data; | 1106 | struct powernow_k8_data *data; |
1109 | cpumask_t oldmask; | 1107 | cpumask_t oldmask; |
1110 | cpumask_of_cpu_ptr_declare(newmask); | ||
1111 | int rc; | 1108 | int rc; |
1112 | 1109 | ||
1113 | if (!cpu_online(pol->cpu)) | 1110 | if (!cpu_online(pol->cpu)) |
@@ -1159,8 +1156,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1159 | 1156 | ||
1160 | /* only run on specific CPU from here on */ | 1157 | /* only run on specific CPU from here on */ |
1161 | oldmask = current->cpus_allowed; | 1158 | oldmask = current->cpus_allowed; |
1162 | cpumask_of_cpu_ptr_next(newmask, pol->cpu); | 1159 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); |
1163 | set_cpus_allowed_ptr(current, newmask); | ||
1164 | 1160 | ||
1165 | if (smp_processor_id() != pol->cpu) { | 1161 | if (smp_processor_id() != pol->cpu) { |
1166 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | 1162 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); |
@@ -1182,7 +1178,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1182 | set_cpus_allowed_ptr(current, &oldmask); | 1178 | set_cpus_allowed_ptr(current, &oldmask); |
1183 | 1179 | ||
1184 | if (cpu_family == CPU_HW_PSTATE) | 1180 | if (cpu_family == CPU_HW_PSTATE) |
1185 | pol->cpus = *newmask; | 1181 | pol->cpus = cpumask_of_cpu(pol->cpu); |
1186 | else | 1182 | else |
1187 | pol->cpus = per_cpu(cpu_core_map, pol->cpu); | 1183 | pol->cpus = per_cpu(cpu_core_map, pol->cpu); |
1188 | data->available_cores = &(pol->cpus); | 1184 | data->available_cores = &(pol->cpus); |
@@ -1248,7 +1244,6 @@ static unsigned int powernowk8_get (unsigned int cpu) | |||
1248 | { | 1244 | { |
1249 | struct powernow_k8_data *data; | 1245 | struct powernow_k8_data *data; |
1250 | cpumask_t oldmask = current->cpus_allowed; | 1246 | cpumask_t oldmask = current->cpus_allowed; |
1251 | cpumask_of_cpu_ptr(newmask, cpu); | ||
1252 | unsigned int khz = 0; | 1247 | unsigned int khz = 0; |
1253 | unsigned int first; | 1248 | unsigned int first; |
1254 | 1249 | ||
@@ -1258,7 +1253,7 @@ static unsigned int powernowk8_get (unsigned int cpu) | |||
1258 | if (!data) | 1253 | if (!data) |
1259 | return -EINVAL; | 1254 | return -EINVAL; |
1260 | 1255 | ||
1261 | set_cpus_allowed_ptr(current, newmask); | 1256 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
1262 | if (smp_processor_id() != cpu) { | 1257 | if (smp_processor_id() != cpu) { |
1263 | printk(KERN_ERR PFX | 1258 | printk(KERN_ERR PFX |
1264 | "limiting to CPU %d failed in powernowk8_get\n", cpu); | 1259 | "limiting to CPU %d failed in powernowk8_get\n", cpu); |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index ca2ac13b7af2..15e13c01cc36 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | |||
@@ -324,10 +324,9 @@ static unsigned int get_cur_freq(unsigned int cpu) | |||
324 | unsigned l, h; | 324 | unsigned l, h; |
325 | unsigned clock_freq; | 325 | unsigned clock_freq; |
326 | cpumask_t saved_mask; | 326 | cpumask_t saved_mask; |
327 | cpumask_of_cpu_ptr(new_mask, cpu); | ||
328 | 327 | ||
329 | saved_mask = current->cpus_allowed; | 328 | saved_mask = current->cpus_allowed; |
330 | set_cpus_allowed_ptr(current, new_mask); | 329 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
331 | if (smp_processor_id() != cpu) | 330 | if (smp_processor_id() != cpu) |
332 | return 0; | 331 | return 0; |
333 | 332 | ||
@@ -585,15 +584,12 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
585 | * Best effort undo.. | 584 | * Best effort undo.. |
586 | */ | 585 | */ |
587 | 586 | ||
588 | if (!cpus_empty(*covered_cpus)) { | 587 | if (!cpus_empty(*covered_cpus)) |
589 | cpumask_of_cpu_ptr_declare(new_mask); | ||
590 | |||
591 | for_each_cpu_mask_nr(j, *covered_cpus) { | 588 | for_each_cpu_mask_nr(j, *covered_cpus) { |
592 | cpumask_of_cpu_ptr_next(new_mask, j); | 589 | set_cpus_allowed_ptr(current, |
593 | set_cpus_allowed_ptr(current, new_mask); | 590 | &cpumask_of_cpu(j)); |
594 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | 591 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); |
595 | } | 592 | } |
596 | } | ||
597 | 593 | ||
598 | tmp = freqs.new; | 594 | tmp = freqs.new; |
599 | freqs.new = freqs.old; | 595 | freqs.new = freqs.old; |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 2f3728dc24f6..191f7263c61d 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
@@ -244,8 +244,7 @@ static unsigned int _speedstep_get(const cpumask_t *cpus) | |||
244 | 244 | ||
245 | static unsigned int speedstep_get(unsigned int cpu) | 245 | static unsigned int speedstep_get(unsigned int cpu) |
246 | { | 246 | { |
247 | cpumask_of_cpu_ptr(newmask, cpu); | 247 | return _speedstep_get(&cpumask_of_cpu(cpu)); |
248 | return _speedstep_get(newmask); | ||
249 | } | 248 | } |
250 | 249 | ||
251 | /** | 250 | /** |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 650d40f7912b..6b0a10b002f1 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -516,7 +516,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
516 | unsigned long j; | 516 | unsigned long j; |
517 | int retval; | 517 | int retval; |
518 | cpumask_t oldmask; | 518 | cpumask_t oldmask; |
519 | cpumask_of_cpu_ptr(newmask, cpu); | ||
520 | 519 | ||
521 | if (num_cache_leaves == 0) | 520 | if (num_cache_leaves == 0) |
522 | return -ENOENT; | 521 | return -ENOENT; |
@@ -527,7 +526,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
527 | return -ENOMEM; | 526 | return -ENOMEM; |
528 | 527 | ||
529 | oldmask = current->cpus_allowed; | 528 | oldmask = current->cpus_allowed; |
530 | retval = set_cpus_allowed_ptr(current, newmask); | 529 | retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
531 | if (retval) | 530 | if (retval) |
532 | goto out; | 531 | goto out; |
533 | 532 | ||
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 3fee2aa50f3f..b68e21f06f4f 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -62,12 +62,10 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) | |||
62 | 62 | ||
63 | if (reload) { | 63 | if (reload) { |
64 | #ifdef CONFIG_SMP | 64 | #ifdef CONFIG_SMP |
65 | cpumask_of_cpu_ptr_declare(mask); | ||
66 | |||
67 | preempt_disable(); | 65 | preempt_disable(); |
68 | load_LDT(pc); | 66 | load_LDT(pc); |
69 | cpumask_of_cpu_ptr_next(mask, smp_processor_id()); | 67 | if (!cpus_equal(current->mm->cpu_vm_mask, |
70 | if (!cpus_equal(current->mm->cpu_vm_mask, *mask)) | 68 | cpumask_of_cpu(smp_processor_id()))) |
71 | smp_call_function(flush_ldt, current->mm, 1); | 69 | smp_call_function(flush_ldt, current->mm, 1); |
72 | preempt_enable(); | 70 | preempt_enable(); |
73 | #else | 71 | #else |
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c index 6994c751590e..652fa5c38ebe 100644 --- a/arch/x86/kernel/microcode.c +++ b/arch/x86/kernel/microcode.c | |||
@@ -388,7 +388,6 @@ static int do_microcode_update (void) | |||
388 | void *new_mc = NULL; | 388 | void *new_mc = NULL; |
389 | int cpu; | 389 | int cpu; |
390 | cpumask_t old; | 390 | cpumask_t old; |
391 | cpumask_of_cpu_ptr_declare(newmask); | ||
392 | 391 | ||
393 | old = current->cpus_allowed; | 392 | old = current->cpus_allowed; |
394 | 393 | ||
@@ -405,8 +404,7 @@ static int do_microcode_update (void) | |||
405 | 404 | ||
406 | if (!uci->valid) | 405 | if (!uci->valid) |
407 | continue; | 406 | continue; |
408 | cpumask_of_cpu_ptr_next(newmask, cpu); | 407 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
409 | set_cpus_allowed_ptr(current, newmask); | ||
410 | error = get_maching_microcode(new_mc, cpu); | 408 | error = get_maching_microcode(new_mc, cpu); |
411 | if (error < 0) | 409 | if (error < 0) |
412 | goto out; | 410 | goto out; |
@@ -576,7 +574,6 @@ static int apply_microcode_check_cpu(int cpu) | |||
576 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 574 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
577 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 575 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
578 | cpumask_t old; | 576 | cpumask_t old; |
579 | cpumask_of_cpu_ptr(newmask, cpu); | ||
580 | unsigned int val[2]; | 577 | unsigned int val[2]; |
581 | int err = 0; | 578 | int err = 0; |
582 | 579 | ||
@@ -585,7 +582,7 @@ static int apply_microcode_check_cpu(int cpu) | |||
585 | return 0; | 582 | return 0; |
586 | 583 | ||
587 | old = current->cpus_allowed; | 584 | old = current->cpus_allowed; |
588 | set_cpus_allowed_ptr(current, newmask); | 585 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
589 | 586 | ||
590 | /* Check if the microcode we have in memory matches the CPU */ | 587 | /* Check if the microcode we have in memory matches the CPU */ |
591 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || | 588 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || |
@@ -623,12 +620,11 @@ static int apply_microcode_check_cpu(int cpu) | |||
623 | static void microcode_init_cpu(int cpu, int resume) | 620 | static void microcode_init_cpu(int cpu, int resume) |
624 | { | 621 | { |
625 | cpumask_t old; | 622 | cpumask_t old; |
626 | cpumask_of_cpu_ptr(newmask, cpu); | ||
627 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 623 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
628 | 624 | ||
629 | old = current->cpus_allowed; | 625 | old = current->cpus_allowed; |
630 | 626 | ||
631 | set_cpus_allowed_ptr(current, newmask); | 627 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
632 | mutex_lock(µcode_mutex); | 628 | mutex_lock(µcode_mutex); |
633 | collect_cpu_info(cpu); | 629 | collect_cpu_info(cpu); |
634 | if (uci->valid && system_state == SYSTEM_RUNNING && !resume) | 630 | if (uci->valid && system_state == SYSTEM_RUNNING && !resume) |
@@ -661,13 +657,10 @@ static ssize_t reload_store(struct sys_device *dev, | |||
661 | if (end == buf) | 657 | if (end == buf) |
662 | return -EINVAL; | 658 | return -EINVAL; |
663 | if (val == 1) { | 659 | if (val == 1) { |
664 | cpumask_t old; | 660 | cpumask_t old = current->cpus_allowed; |
665 | cpumask_of_cpu_ptr(newmask, cpu); | ||
666 | |||
667 | old = current->cpus_allowed; | ||
668 | 661 | ||
669 | get_online_cpus(); | 662 | get_online_cpus(); |
670 | set_cpus_allowed_ptr(current, newmask); | 663 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
671 | 664 | ||
672 | mutex_lock(µcode_mutex); | 665 | mutex_lock(µcode_mutex); |
673 | if (uci->valid) | 666 | if (uci->valid) |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 06a9f643817e..724adfc63cb9 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -414,25 +414,20 @@ void native_machine_shutdown(void) | |||
414 | 414 | ||
415 | /* The boot cpu is always logical cpu 0 */ | 415 | /* The boot cpu is always logical cpu 0 */ |
416 | int reboot_cpu_id = 0; | 416 | int reboot_cpu_id = 0; |
417 | cpumask_of_cpu_ptr(newmask, reboot_cpu_id); | ||
418 | 417 | ||
419 | #ifdef CONFIG_X86_32 | 418 | #ifdef CONFIG_X86_32 |
420 | /* See if there has been given a command line override */ | 419 | /* See if there has been given a command line override */ |
421 | if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && | 420 | if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && |
422 | cpu_online(reboot_cpu)) { | 421 | cpu_online(reboot_cpu)) |
423 | reboot_cpu_id = reboot_cpu; | 422 | reboot_cpu_id = reboot_cpu; |
424 | cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id); | ||
425 | } | ||
426 | #endif | 423 | #endif |
427 | 424 | ||
428 | /* Make certain the cpu I'm about to reboot on is online */ | 425 | /* Make certain the cpu I'm about to reboot on is online */ |
429 | if (!cpu_online(reboot_cpu_id)) { | 426 | if (!cpu_online(reboot_cpu_id)) |
430 | reboot_cpu_id = smp_processor_id(); | 427 | reboot_cpu_id = smp_processor_id(); |
431 | cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id); | ||
432 | } | ||
433 | 428 | ||
434 | /* Make certain I only run on the appropriate processor */ | 429 | /* Make certain I only run on the appropriate processor */ |
435 | set_cpus_allowed_ptr(current, newmask); | 430 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); |
436 | 431 | ||
437 | /* O.K Now that I'm on the appropriate processor, | 432 | /* O.K Now that I'm on the appropriate processor, |
438 | * stop all of the others. | 433 | * stop all of the others. |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index f7745f94c006..1cd53dfcd309 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -81,10 +81,12 @@ static void __init setup_per_cpu_maps(void) | |||
81 | } | 81 | } |
82 | 82 | ||
83 | #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP | 83 | #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP |
84 | cpumask_t *cpumask_of_cpu_map __read_mostly; | 84 | /* |
85 | EXPORT_SYMBOL(cpumask_of_cpu_map); | 85 | * Replace static cpumask_of_cpu_map in the initdata section, |
86 | 86 | * with one that's allocated sized by the possible number of cpus. | |
87 | /* requires nr_cpu_ids to be initialized */ | 87 | * |
88 | * (requires nr_cpu_ids to be initialized) | ||
89 | */ | ||
88 | static void __init setup_cpumask_of_cpu(void) | 90 | static void __init setup_cpumask_of_cpu(void) |
89 | { | 91 | { |
90 | int i; | 92 | int i; |
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index a2c3f9cfa549..a56fc6c4394b 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
@@ -827,7 +827,6 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) | |||
827 | static int acpi_processor_get_throttling(struct acpi_processor *pr) | 827 | static int acpi_processor_get_throttling(struct acpi_processor *pr) |
828 | { | 828 | { |
829 | cpumask_t saved_mask; | 829 | cpumask_t saved_mask; |
830 | cpumask_of_cpu_ptr_declare(new_mask); | ||
831 | int ret; | 830 | int ret; |
832 | 831 | ||
833 | if (!pr) | 832 | if (!pr) |
@@ -839,8 +838,7 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr) | |||
839 | * Migrate task to the cpu pointed by pr. | 838 | * Migrate task to the cpu pointed by pr. |
840 | */ | 839 | */ |
841 | saved_mask = current->cpus_allowed; | 840 | saved_mask = current->cpus_allowed; |
842 | cpumask_of_cpu_ptr_next(new_mask, pr->id); | 841 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); |
843 | set_cpus_allowed_ptr(current, new_mask); | ||
844 | ret = pr->throttling.acpi_processor_get_throttling(pr); | 842 | ret = pr->throttling.acpi_processor_get_throttling(pr); |
845 | /* restore the previous state */ | 843 | /* restore the previous state */ |
846 | set_cpus_allowed_ptr(current, &saved_mask); | 844 | set_cpus_allowed_ptr(current, &saved_mask); |
@@ -989,7 +987,6 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, | |||
989 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | 987 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state) |
990 | { | 988 | { |
991 | cpumask_t saved_mask; | 989 | cpumask_t saved_mask; |
992 | cpumask_of_cpu_ptr_declare(new_mask); | ||
993 | int ret = 0; | 990 | int ret = 0; |
994 | unsigned int i; | 991 | unsigned int i; |
995 | struct acpi_processor *match_pr; | 992 | struct acpi_processor *match_pr; |
@@ -1028,8 +1025,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1028 | * it can be called only for the cpu pointed by pr. | 1025 | * it can be called only for the cpu pointed by pr. |
1029 | */ | 1026 | */ |
1030 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { | 1027 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { |
1031 | cpumask_of_cpu_ptr_next(new_mask, pr->id); | 1028 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); |
1032 | set_cpus_allowed_ptr(current, new_mask); | ||
1033 | ret = p_throttling->acpi_processor_set_throttling(pr, | 1029 | ret = p_throttling->acpi_processor_set_throttling(pr, |
1034 | t_state.target_state); | 1030 | t_state.target_state); |
1035 | } else { | 1031 | } else { |
@@ -1060,8 +1056,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1060 | continue; | 1056 | continue; |
1061 | } | 1057 | } |
1062 | t_state.cpu = i; | 1058 | t_state.cpu = i; |
1063 | cpumask_of_cpu_ptr_next(new_mask, i); | 1059 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); |
1064 | set_cpus_allowed_ptr(current, new_mask); | ||
1065 | ret = match_pr->throttling. | 1060 | ret = match_pr->throttling. |
1066 | acpi_processor_set_throttling( | 1061 | acpi_processor_set_throttling( |
1067 | match_pr, t_state.target_state); | 1062 | match_pr, t_state.target_state); |
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index c66817e7717b..50a071f1c945 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c | |||
@@ -245,7 +245,6 @@ static ssize_t host_control_on_shutdown_store(struct device *dev, | |||
245 | static int smi_request(struct smi_cmd *smi_cmd) | 245 | static int smi_request(struct smi_cmd *smi_cmd) |
246 | { | 246 | { |
247 | cpumask_t old_mask; | 247 | cpumask_t old_mask; |
248 | cpumask_of_cpu_ptr(new_mask, 0); | ||
249 | int ret = 0; | 248 | int ret = 0; |
250 | 249 | ||
251 | if (smi_cmd->magic != SMI_CMD_MAGIC) { | 250 | if (smi_cmd->magic != SMI_CMD_MAGIC) { |
@@ -256,7 +255,7 @@ static int smi_request(struct smi_cmd *smi_cmd) | |||
256 | 255 | ||
257 | /* SMI requires CPU 0 */ | 256 | /* SMI requires CPU 0 */ |
258 | old_mask = current->cpus_allowed; | 257 | old_mask = current->cpus_allowed; |
259 | set_cpus_allowed_ptr(current, new_mask); | 258 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); |
260 | if (smp_processor_id() != 0) { | 259 | if (smp_processor_id() != 0) { |
261 | dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", | 260 | dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", |
262 | __func__); | 261 | __func__); |
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 579b01ff82d4..c3b4227f48a5 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c | |||
@@ -229,11 +229,10 @@ xpc_hb_checker(void *ignore) | |||
229 | int last_IRQ_count = 0; | 229 | int last_IRQ_count = 0; |
230 | int new_IRQ_count; | 230 | int new_IRQ_count; |
231 | int force_IRQ = 0; | 231 | int force_IRQ = 0; |
232 | cpumask_of_cpu_ptr(cpumask, XPC_HB_CHECK_CPU); | ||
233 | 232 | ||
234 | /* this thread was marked active by xpc_hb_init() */ | 233 | /* this thread was marked active by xpc_hb_init() */ |
235 | 234 | ||
236 | set_cpus_allowed_ptr(current, cpumask); | 235 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU)); |
237 | 236 | ||
238 | /* set our heartbeating to other partitions into motion */ | 237 | /* set our heartbeating to other partitions into motion */ |
239 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); | 238 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 1b5c98e7fef7..8fa3b6d4a320 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -62,15 +62,7 @@ | |||
62 | * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids | 62 | * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids |
63 | * | 63 | * |
64 | * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set | 64 | * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set |
65 | *ifdef CONFIG_HAS_CPUMASK_OF_CPU | 65 | * (can be used as an lvalue) |
66 | * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t *v | ||
67 | * cpumask_of_cpu_ptr_next(v, cpu) Sets v = &cpumask_of_cpu_map[cpu] | ||
68 | * cpumask_of_cpu_ptr(v, cpu) Combines above two operations | ||
69 | *else | ||
70 | * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t _v and *v = &_v | ||
71 | * cpumask_of_cpu_ptr_next(v, cpu) Sets _v = cpumask_of_cpu(cpu) | ||
72 | * cpumask_of_cpu_ptr(v, cpu) Combines above two operations | ||
73 | *endif | ||
74 | * CPU_MASK_ALL Initializer - all bits set | 66 | * CPU_MASK_ALL Initializer - all bits set |
75 | * CPU_MASK_NONE Initializer - no bits set | 67 | * CPU_MASK_NONE Initializer - no bits set |
76 | * unsigned long *cpus_addr(mask) Array of unsigned long's in mask | 68 | * unsigned long *cpus_addr(mask) Array of unsigned long's in mask |
@@ -274,36 +266,9 @@ static inline void __cpus_shift_left(cpumask_t *dstp, | |||
274 | } | 266 | } |
275 | 267 | ||
276 | 268 | ||
277 | #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP | 269 | /* cpumask_of_cpu_map[] is in kernel/cpu.c */ |
278 | extern cpumask_t *cpumask_of_cpu_map; | 270 | extern const cpumask_t *cpumask_of_cpu_map; |
279 | #define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) | 271 | #define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) |
280 | #define cpumask_of_cpu_ptr(v, cpu) \ | ||
281 | const cpumask_t *v = &cpumask_of_cpu(cpu) | ||
282 | #define cpumask_of_cpu_ptr_declare(v) \ | ||
283 | const cpumask_t *v | ||
284 | #define cpumask_of_cpu_ptr_next(v, cpu) \ | ||
285 | v = &cpumask_of_cpu(cpu) | ||
286 | #else | ||
287 | #define cpumask_of_cpu(cpu) \ | ||
288 | ({ \ | ||
289 | typeof(_unused_cpumask_arg_) m; \ | ||
290 | if (sizeof(m) == sizeof(unsigned long)) { \ | ||
291 | m.bits[0] = 1UL<<(cpu); \ | ||
292 | } else { \ | ||
293 | cpus_clear(m); \ | ||
294 | cpu_set((cpu), m); \ | ||
295 | } \ | ||
296 | m; \ | ||
297 | }) | ||
298 | #define cpumask_of_cpu_ptr(v, cpu) \ | ||
299 | cpumask_t _##v = cpumask_of_cpu(cpu); \ | ||
300 | const cpumask_t *v = &_##v | ||
301 | #define cpumask_of_cpu_ptr_declare(v) \ | ||
302 | cpumask_t _##v; \ | ||
303 | const cpumask_t *v = &_##v | ||
304 | #define cpumask_of_cpu_ptr_next(v, cpu) \ | ||
305 | _##v = cpumask_of_cpu(cpu) | ||
306 | #endif | ||
307 | 272 | ||
308 | #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) | 273 | #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) |
309 | 274 | ||
diff --git a/kernel/cpu.c b/kernel/cpu.c index 10ba5f1004a5..a35d8995dc8c 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -461,3 +461,116 @@ out: | |||
461 | #endif /* CONFIG_PM_SLEEP_SMP */ | 461 | #endif /* CONFIG_PM_SLEEP_SMP */ |
462 | 462 | ||
463 | #endif /* CONFIG_SMP */ | 463 | #endif /* CONFIG_SMP */ |
464 | |||
465 | /* 64 bits of zeros, for initializers. */ | ||
466 | #if BITS_PER_LONG == 32 | ||
467 | #define Z64 0, 0 | ||
468 | #else | ||
469 | #define Z64 0 | ||
470 | #endif | ||
471 | |||
472 | /* Initializer macros. */ | ||
473 | #define CMI0(n) { .bits = { 1UL << (n) } } | ||
474 | #define CMI(n, ...) { .bits = { __VA_ARGS__, 1UL << ((n) % BITS_PER_LONG) } } | ||
475 | |||
476 | #define CMI8(n, ...) \ | ||
477 | CMI((n), __VA_ARGS__), CMI((n)+1, __VA_ARGS__), \ | ||
478 | CMI((n)+2, __VA_ARGS__), CMI((n)+3, __VA_ARGS__), \ | ||
479 | CMI((n)+4, __VA_ARGS__), CMI((n)+5, __VA_ARGS__), \ | ||
480 | CMI((n)+6, __VA_ARGS__), CMI((n)+7, __VA_ARGS__) | ||
481 | |||
482 | #if BITS_PER_LONG == 32 | ||
483 | #define CMI64(n, ...) \ | ||
484 | CMI8((n), __VA_ARGS__), CMI8((n)+8, __VA_ARGS__), \ | ||
485 | CMI8((n)+16, __VA_ARGS__), CMI8((n)+24, __VA_ARGS__), \ | ||
486 | CMI8((n)+32, 0, __VA_ARGS__), CMI8((n)+40, 0, __VA_ARGS__), \ | ||
487 | CMI8((n)+48, 0, __VA_ARGS__), CMI8((n)+56, 0, __VA_ARGS__) | ||
488 | #else | ||
489 | #define CMI64(n, ...) \ | ||
490 | CMI8((n), __VA_ARGS__), CMI8((n)+8, __VA_ARGS__), \ | ||
491 | CMI8((n)+16, __VA_ARGS__), CMI8((n)+24, __VA_ARGS__), \ | ||
492 | CMI8((n)+32, __VA_ARGS__), CMI8((n)+40, __VA_ARGS__), \ | ||
493 | CMI8((n)+48, __VA_ARGS__), CMI8((n)+56, __VA_ARGS__) | ||
494 | #endif | ||
495 | |||
496 | #define CMI256(n, ...) \ | ||
497 | CMI64((n), __VA_ARGS__), CMI64((n)+64, Z64, __VA_ARGS__), \ | ||
498 | CMI64((n)+128, Z64, Z64, __VA_ARGS__), \ | ||
499 | CMI64((n)+192, Z64, Z64, Z64, __VA_ARGS__) | ||
500 | #define Z256 Z64, Z64, Z64, Z64 | ||
501 | |||
502 | #define CMI1024(n, ...) \ | ||
503 | CMI256((n), __VA_ARGS__), \ | ||
504 | CMI256((n)+256, Z256, __VA_ARGS__), \ | ||
505 | CMI256((n)+512, Z256, Z256, __VA_ARGS__), \ | ||
506 | CMI256((n)+768, Z256, Z256, Z256, __VA_ARGS__) | ||
507 | #define Z1024 Z256, Z256, Z256, Z256 | ||
508 | |||
509 | /* We want this statically initialized, just to be safe. We try not | ||
510 | * to waste too much space, either. */ | ||
511 | static const cpumask_t cpumask_map[] | ||
512 | #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP | ||
513 | __initdata | ||
514 | #endif | ||
515 | = { | ||
516 | CMI0(0), CMI0(1), CMI0(2), CMI0(3), | ||
517 | #if NR_CPUS > 4 | ||
518 | CMI0(4), CMI0(5), CMI0(6), CMI0(7), | ||
519 | #endif | ||
520 | #if NR_CPUS > 8 | ||
521 | CMI0(8), CMI0(9), CMI0(10), CMI0(11), | ||
522 | CMI0(12), CMI0(13), CMI0(14), CMI0(15), | ||
523 | #endif | ||
524 | #if NR_CPUS > 16 | ||
525 | CMI0(16), CMI0(17), CMI0(18), CMI0(19), | ||
526 | CMI0(20), CMI0(21), CMI0(22), CMI0(23), | ||
527 | CMI0(24), CMI0(25), CMI0(26), CMI0(27), | ||
528 | CMI0(28), CMI0(29), CMI0(30), CMI0(31), | ||
529 | #endif | ||
530 | #if NR_CPUS > 32 | ||
531 | #if BITS_PER_LONG == 32 | ||
532 | CMI(32, 0), CMI(33, 0), CMI(34, 0), CMI(35, 0), | ||
533 | CMI(36, 0), CMI(37, 0), CMI(38, 0), CMI(39, 0), | ||
534 | CMI(40, 0), CMI(41, 0), CMI(42, 0), CMI(43, 0), | ||
535 | CMI(44, 0), CMI(45, 0), CMI(46, 0), CMI(47, 0), | ||
536 | CMI(48, 0), CMI(49, 0), CMI(50, 0), CMI(51, 0), | ||
537 | CMI(52, 0), CMI(53, 0), CMI(54, 0), CMI(55, 0), | ||
538 | CMI(56, 0), CMI(57, 0), CMI(58, 0), CMI(59, 0), | ||
539 | CMI(60, 0), CMI(61, 0), CMI(62, 0), CMI(63, 0), | ||
540 | #else | ||
541 | CMI0(32), CMI0(33), CMI0(34), CMI0(35), | ||
542 | CMI0(36), CMI0(37), CMI0(38), CMI0(39), | ||
543 | CMI0(40), CMI0(41), CMI0(42), CMI0(43), | ||
544 | CMI0(44), CMI0(45), CMI0(46), CMI0(47), | ||
545 | CMI0(48), CMI0(49), CMI0(50), CMI0(51), | ||
546 | CMI0(52), CMI0(53), CMI0(54), CMI0(55), | ||
547 | CMI0(56), CMI0(57), CMI0(58), CMI0(59), | ||
548 | CMI0(60), CMI0(61), CMI0(62), CMI0(63), | ||
549 | #endif /* BITS_PER_LONG == 64 */ | ||
550 | #endif | ||
551 | #if NR_CPUS > 64 | ||
552 | CMI64(64, Z64), | ||
553 | #endif | ||
554 | #if NR_CPUS > 128 | ||
555 | CMI64(128, Z64, Z64), CMI64(192, Z64, Z64, Z64), | ||
556 | #endif | ||
557 | #if NR_CPUS > 256 | ||
558 | CMI256(256, Z256), | ||
559 | #endif | ||
560 | #if NR_CPUS > 512 | ||
561 | CMI256(512, Z256, Z256), CMI256(768, Z256, Z256, Z256), | ||
562 | #endif | ||
563 | #if NR_CPUS > 1024 | ||
564 | CMI1024(1024, Z1024), | ||
565 | #endif | ||
566 | #if NR_CPUS > 2048 | ||
567 | CMI1024(2048, Z1024, Z1024), CMI1024(3072, Z1024, Z1024, Z1024), | ||
568 | #endif | ||
569 | #if NR_CPUS > 4096 | ||
570 | #error NR_CPUS too big. Fix initializers or set CONFIG_HAVE_CPUMASK_OF_CPU_MAP | ||
571 | #endif | ||
572 | }; | ||
573 | |||
574 | const cpumask_t *cpumask_of_cpu_map = cpumask_map; | ||
575 | |||
576 | EXPORT_SYMBOL_GPL(cpumask_of_cpu_map); | ||
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 738b411ff2d3..ba9b2054ecbd 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -33,9 +33,8 @@ static int stopmachine(void *cpu) | |||
33 | { | 33 | { |
34 | int irqs_disabled = 0; | 34 | int irqs_disabled = 0; |
35 | int prepared = 0; | 35 | int prepared = 0; |
36 | cpumask_of_cpu_ptr(cpumask, (int)(long)cpu); | ||
37 | 36 | ||
38 | set_cpus_allowed_ptr(current, cpumask); | 37 | set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu)); |
39 | 38 | ||
40 | /* Ack: we are alive */ | 39 | /* Ack: we are alive */ |
41 | smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ | 40 | smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index bf43284d6855..80c4336f4188 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -196,12 +196,10 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
196 | struct tick_device *td; | 196 | struct tick_device *td; |
197 | int cpu, ret = NOTIFY_OK; | 197 | int cpu, ret = NOTIFY_OK; |
198 | unsigned long flags; | 198 | unsigned long flags; |
199 | cpumask_of_cpu_ptr_declare(cpumask); | ||
200 | 199 | ||
201 | spin_lock_irqsave(&tick_device_lock, flags); | 200 | spin_lock_irqsave(&tick_device_lock, flags); |
202 | 201 | ||
203 | cpu = smp_processor_id(); | 202 | cpu = smp_processor_id(); |
204 | cpumask_of_cpu_ptr_next(cpumask, cpu); | ||
205 | if (!cpu_isset(cpu, newdev->cpumask)) | 203 | if (!cpu_isset(cpu, newdev->cpumask)) |
206 | goto out_bc; | 204 | goto out_bc; |
207 | 205 | ||
@@ -209,7 +207,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
209 | curdev = td->evtdev; | 207 | curdev = td->evtdev; |
210 | 208 | ||
211 | /* cpu local device ? */ | 209 | /* cpu local device ? */ |
212 | if (!cpus_equal(newdev->cpumask, *cpumask)) { | 210 | if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) { |
213 | 211 | ||
214 | /* | 212 | /* |
215 | * If the cpu affinity of the device interrupt can not | 213 | * If the cpu affinity of the device interrupt can not |
@@ -222,7 +220,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
222 | * If we have a cpu local device already, do not replace it | 220 | * If we have a cpu local device already, do not replace it |
223 | * by a non cpu local device | 221 | * by a non cpu local device |
224 | */ | 222 | */ |
225 | if (curdev && cpus_equal(curdev->cpumask, *cpumask)) | 223 | if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu))) |
226 | goto out_bc; | 224 | goto out_bc; |
227 | } | 225 | } |
228 | 226 | ||
@@ -254,7 +252,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
254 | curdev = NULL; | 252 | curdev = NULL; |
255 | } | 253 | } |
256 | clockevents_exchange_device(curdev, newdev); | 254 | clockevents_exchange_device(curdev, newdev); |
257 | tick_setup_device(td, newdev, cpu, cpumask); | 255 | tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu)); |
258 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) | 256 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) |
259 | tick_oneshot_notify(); | 257 | tick_oneshot_notify(); |
260 | 258 | ||
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index ce2d723c10e1..bb948e52ce20 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -213,9 +213,7 @@ static void start_stack_timers(void) | |||
213 | int cpu; | 213 | int cpu; |
214 | 214 | ||
215 | for_each_online_cpu(cpu) { | 215 | for_each_online_cpu(cpu) { |
216 | cpumask_of_cpu_ptr(new_mask, cpu); | 216 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
217 | |||
218 | set_cpus_allowed_ptr(current, new_mask); | ||
219 | start_stack_timer(cpu); | 217 | start_stack_timer(cpu); |
220 | } | 218 | } |
221 | set_cpus_allowed_ptr(current, &saved_mask); | 219 | set_cpus_allowed_ptr(current, &saved_mask); |
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index c4381d9516f6..0f8fc22ed103 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c | |||
@@ -11,7 +11,6 @@ notrace unsigned int debug_smp_processor_id(void) | |||
11 | { | 11 | { |
12 | unsigned long preempt_count = preempt_count(); | 12 | unsigned long preempt_count = preempt_count(); |
13 | int this_cpu = raw_smp_processor_id(); | 13 | int this_cpu = raw_smp_processor_id(); |
14 | cpumask_of_cpu_ptr_declare(this_mask); | ||
15 | 14 | ||
16 | if (likely(preempt_count)) | 15 | if (likely(preempt_count)) |
17 | goto out; | 16 | goto out; |
@@ -23,9 +22,7 @@ notrace unsigned int debug_smp_processor_id(void) | |||
23 | * Kernel threads bound to a single CPU can safely use | 22 | * Kernel threads bound to a single CPU can safely use |
24 | * smp_processor_id(): | 23 | * smp_processor_id(): |
25 | */ | 24 | */ |
26 | cpumask_of_cpu_ptr_next(this_mask, this_cpu); | 25 | if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu))) |
27 | |||
28 | if (cpus_equal(current->cpus_allowed, *this_mask)) | ||
29 | goto out; | 26 | goto out; |
30 | 27 | ||
31 | /* | 28 | /* |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 835d27413083..5a32cb7c4bb4 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -310,8 +310,7 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) | |||
310 | switch (m->mode) { | 310 | switch (m->mode) { |
311 | case SVC_POOL_PERCPU: | 311 | case SVC_POOL_PERCPU: |
312 | { | 312 | { |
313 | cpumask_of_cpu_ptr(cpumask, node); | 313 | set_cpus_allowed_ptr(task, &cpumask_of_cpu(node)); |
314 | set_cpus_allowed_ptr(task, cpumask); | ||
315 | break; | 314 | break; |
316 | } | 315 | } |
317 | case SVC_POOL_PERNODE: | 316 | case SVC_POOL_PERNODE: |