aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/acpi/cstate.c3
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c16
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c23
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c157
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c7
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c5
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c4
-rw-r--r--arch/x86/kernel/cpu/proc.c2
-rw-r--r--arch/x86/kernel/genapic_flat_64.c2
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c4
-rw-r--r--arch/x86/kernel/io_apic_64.c12
-rw-r--r--arch/x86/kernel/ldt.c6
-rw-r--r--arch/x86/kernel/microcode.c13
-rw-r--r--arch/x86/kernel/reboot.c14
-rw-r--r--arch/x86/kernel/smpboot.c8
-rw-r--r--arch/x86/xen/smp.c4
-rw-r--r--drivers/acpi/processor_throttling.c17
-rw-r--r--drivers/base/cpu.c4
-rw-r--r--drivers/cpufreq/cpufreq.c14
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c4
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c79
-rw-r--r--drivers/firmware/dcdbas.c3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c3
-rw-r--r--include/asm-x86/ipi.h2
-rw-r--r--include/asm-x86/processor.h2
-rw-r--r--include/linux/cpumask.h166
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/rcuclassic.c2
-rw-r--r--kernel/rcupreempt.c10
-rw-r--r--kernel/sched.c36
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/sched_rt.c4
-rw-r--r--kernel/stop_machine.c3
-rw-r--r--kernel/taskstats.c4
-rw-r--r--kernel/time/clocksource.c4
-rw-r--r--kernel/time/tick-broadcast.c3
-rw-r--r--kernel/time/tick-common.c14
-rw-r--r--kernel/trace/trace_sysprof.c4
-rw-r--r--kernel/workqueue.c6
-rw-r--r--lib/cpumask.c9
-rw-r--r--lib/smp_processor_id.c6
-rw-r--r--mm/allocpercpu.c4
-rw-r--r--mm/vmstat.c2
-rw-r--r--net/core/dev.c4
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/sunrpc/svc.c3
50 files changed, 442 insertions, 270 deletions
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index c2502eb9aa83..9220cf46aa10 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -73,6 +73,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
73 struct cpuinfo_x86 *c = &cpu_data(cpu); 73 struct cpuinfo_x86 *c = &cpu_data(cpu);
74 74
75 cpumask_t saved_mask; 75 cpumask_t saved_mask;
76 cpumask_of_cpu_ptr(new_mask, cpu);
76 int retval; 77 int retval;
77 unsigned int eax, ebx, ecx, edx; 78 unsigned int eax, ebx, ecx, edx;
78 unsigned int edx_part; 79 unsigned int edx_part;
@@ -91,7 +92,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
91 92
92 /* Make sure we are running on right CPU */ 93 /* Make sure we are running on right CPU */
93 saved_mask = current->cpus_allowed; 94 saved_mask = current->cpus_allowed;
94 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 95 retval = set_cpus_allowed_ptr(current, new_mask);
95 if (retval) 96 if (retval)
96 return -1; 97 return -1;
97 98
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index b0c8208df9fa..ff2fff56f0a8 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -200,10 +200,12 @@ static void drv_read(struct drv_cmd *cmd)
200static void drv_write(struct drv_cmd *cmd) 200static void drv_write(struct drv_cmd *cmd)
201{ 201{
202 cpumask_t saved_mask = current->cpus_allowed; 202 cpumask_t saved_mask = current->cpus_allowed;
203 cpumask_of_cpu_ptr_declare(cpu_mask);
203 unsigned int i; 204 unsigned int i;
204 205
205 for_each_cpu_mask(i, cmd->mask) { 206 for_each_cpu_mask_nr(i, cmd->mask) {
206 set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); 207 cpumask_of_cpu_ptr_next(cpu_mask, i);
208 set_cpus_allowed_ptr(current, cpu_mask);
207 do_drv_write(cmd); 209 do_drv_write(cmd);
208 } 210 }
209 211
@@ -267,11 +269,12 @@ static unsigned int get_measured_perf(unsigned int cpu)
267 } aperf_cur, mperf_cur; 269 } aperf_cur, mperf_cur;
268 270
269 cpumask_t saved_mask; 271 cpumask_t saved_mask;
272 cpumask_of_cpu_ptr(cpu_mask, cpu);
270 unsigned int perf_percent; 273 unsigned int perf_percent;
271 unsigned int retval; 274 unsigned int retval;
272 275
273 saved_mask = current->cpus_allowed; 276 saved_mask = current->cpus_allowed;
274 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 277 set_cpus_allowed_ptr(current, cpu_mask);
275 if (get_cpu() != cpu) { 278 if (get_cpu() != cpu) {
276 /* We were not able to run on requested processor */ 279 /* We were not able to run on requested processor */
277 put_cpu(); 280 put_cpu();
@@ -337,6 +340,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
337 340
338static unsigned int get_cur_freq_on_cpu(unsigned int cpu) 341static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
339{ 342{
343 cpumask_of_cpu_ptr(cpu_mask, cpu);
340 struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); 344 struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
341 unsigned int freq; 345 unsigned int freq;
342 unsigned int cached_freq; 346 unsigned int cached_freq;
@@ -349,7 +353,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
349 } 353 }
350 354
351 cached_freq = data->freq_table[data->acpi_data->state].frequency; 355 cached_freq = data->freq_table[data->acpi_data->state].frequency;
352 freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data); 356 freq = extract_freq(get_cur_val(cpu_mask), data);
353 if (freq != cached_freq) { 357 if (freq != cached_freq) {
354 /* 358 /*
355 * The dreaded BIOS frequency change behind our back. 359 * The dreaded BIOS frequency change behind our back.
@@ -451,7 +455,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
451 455
452 freqs.old = perf->states[perf->state].core_frequency * 1000; 456 freqs.old = perf->states[perf->state].core_frequency * 1000;
453 freqs.new = data->freq_table[next_state].frequency; 457 freqs.new = data->freq_table[next_state].frequency;
454 for_each_cpu_mask(i, cmd.mask) { 458 for_each_cpu_mask_nr(i, cmd.mask) {
455 freqs.cpu = i; 459 freqs.cpu = i;
456 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 460 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
457 } 461 }
@@ -466,7 +470,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
466 } 470 }
467 } 471 }
468 472
469 for_each_cpu_mask(i, cmd.mask) { 473 for_each_cpu_mask_nr(i, cmd.mask) {
470 freqs.cpu = i; 474 freqs.cpu = i;
471 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 475 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
472 } 476 }
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index 199e4e05e5dc..f1685fb91fbd 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
122 return 0; 122 return 0;
123 123
124 /* notifiers */ 124 /* notifiers */
125 for_each_cpu_mask(i, policy->cpus) { 125 for_each_cpu_mask_nr(i, policy->cpus) {
126 freqs.cpu = i; 126 freqs.cpu = i;
127 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 127 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
128 } 128 }
@@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
130 /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software 130 /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
131 * Developer's Manual, Volume 3 131 * Developer's Manual, Volume 3
132 */ 132 */
133 for_each_cpu_mask(i, policy->cpus) 133 for_each_cpu_mask_nr(i, policy->cpus)
134 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); 134 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
135 135
136 /* notifiers */ 136 /* notifiers */
137 for_each_cpu_mask(i, policy->cpus) { 137 for_each_cpu_mask_nr(i, policy->cpus) {
138 freqs.cpu = i; 138 freqs.cpu = i;
139 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 139 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
140 } 140 }
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 206791eb46e3..53c7b6936973 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -479,11 +479,12 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi
479static int check_supported_cpu(unsigned int cpu) 479static int check_supported_cpu(unsigned int cpu)
480{ 480{
481 cpumask_t oldmask; 481 cpumask_t oldmask;
482 cpumask_of_cpu_ptr(cpu_mask, cpu);
482 u32 eax, ebx, ecx, edx; 483 u32 eax, ebx, ecx, edx;
483 unsigned int rc = 0; 484 unsigned int rc = 0;
484 485
485 oldmask = current->cpus_allowed; 486 oldmask = current->cpus_allowed;
486 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 487 set_cpus_allowed_ptr(current, cpu_mask);
487 488
488 if (smp_processor_id() != cpu) { 489 if (smp_processor_id() != cpu) {
489 printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); 490 printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
@@ -966,7 +967,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i
966 freqs.old = find_khz_freq_from_fid(data->currfid); 967 freqs.old = find_khz_freq_from_fid(data->currfid);
967 freqs.new = find_khz_freq_from_fid(fid); 968 freqs.new = find_khz_freq_from_fid(fid);
968 969
969 for_each_cpu_mask(i, *(data->available_cores)) { 970 for_each_cpu_mask_nr(i, *(data->available_cores)) {
970 freqs.cpu = i; 971 freqs.cpu = i;
971 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 972 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
972 } 973 }
@@ -974,7 +975,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i
974 res = transition_fid_vid(data, fid, vid); 975 res = transition_fid_vid(data, fid, vid);
975 freqs.new = find_khz_freq_from_fid(data->currfid); 976 freqs.new = find_khz_freq_from_fid(data->currfid);
976 977
977 for_each_cpu_mask(i, *(data->available_cores)) { 978 for_each_cpu_mask_nr(i, *(data->available_cores)) {
978 freqs.cpu = i; 979 freqs.cpu = i;
979 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 980 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
980 } 981 }
@@ -997,7 +998,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
997 freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); 998 freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
998 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); 999 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
999 1000
1000 for_each_cpu_mask(i, *(data->available_cores)) { 1001 for_each_cpu_mask_nr(i, *(data->available_cores)) {
1001 freqs.cpu = i; 1002 freqs.cpu = i;
1002 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 1003 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1003 } 1004 }
@@ -1005,7 +1006,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
1005 res = transition_pstate(data, pstate); 1006 res = transition_pstate(data, pstate);
1006 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); 1007 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
1007 1008
1008 for_each_cpu_mask(i, *(data->available_cores)) { 1009 for_each_cpu_mask_nr(i, *(data->available_cores)) {
1009 freqs.cpu = i; 1010 freqs.cpu = i;
1010 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 1011 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1011 } 1012 }
@@ -1016,6 +1017,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
1016static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) 1017static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
1017{ 1018{
1018 cpumask_t oldmask; 1019 cpumask_t oldmask;
1020 cpumask_of_cpu_ptr(cpu_mask, pol->cpu);
1019 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); 1021 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1020 u32 checkfid; 1022 u32 checkfid;
1021 u32 checkvid; 1023 u32 checkvid;
@@ -1030,7 +1032,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
1030 1032
1031 /* only run on specific CPU from here on */ 1033 /* only run on specific CPU from here on */
1032 oldmask = current->cpus_allowed; 1034 oldmask = current->cpus_allowed;
1033 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); 1035 set_cpus_allowed_ptr(current, cpu_mask);
1034 1036
1035 if (smp_processor_id() != pol->cpu) { 1037 if (smp_processor_id() != pol->cpu) {
1036 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); 1038 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1105,6 +1107,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1105{ 1107{
1106 struct powernow_k8_data *data; 1108 struct powernow_k8_data *data;
1107 cpumask_t oldmask; 1109 cpumask_t oldmask;
1110 cpumask_of_cpu_ptr_declare(newmask);
1108 int rc; 1111 int rc;
1109 1112
1110 if (!cpu_online(pol->cpu)) 1113 if (!cpu_online(pol->cpu))
@@ -1156,7 +1159,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1156 1159
1157 /* only run on specific CPU from here on */ 1160 /* only run on specific CPU from here on */
1158 oldmask = current->cpus_allowed; 1161 oldmask = current->cpus_allowed;
1159 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); 1162 cpumask_of_cpu_ptr_next(newmask, pol->cpu);
1163 set_cpus_allowed_ptr(current, newmask);
1160 1164
1161 if (smp_processor_id() != pol->cpu) { 1165 if (smp_processor_id() != pol->cpu) {
1162 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); 1166 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1178,7 +1182,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1178 set_cpus_allowed_ptr(current, &oldmask); 1182 set_cpus_allowed_ptr(current, &oldmask);
1179 1183
1180 if (cpu_family == CPU_HW_PSTATE) 1184 if (cpu_family == CPU_HW_PSTATE)
1181 pol->cpus = cpumask_of_cpu(pol->cpu); 1185 pol->cpus = *newmask;
1182 else 1186 else
1183 pol->cpus = per_cpu(cpu_core_map, pol->cpu); 1187 pol->cpus = per_cpu(cpu_core_map, pol->cpu);
1184 data->available_cores = &(pol->cpus); 1188 data->available_cores = &(pol->cpus);
@@ -1244,6 +1248,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
1244{ 1248{
1245 struct powernow_k8_data *data; 1249 struct powernow_k8_data *data;
1246 cpumask_t oldmask = current->cpus_allowed; 1250 cpumask_t oldmask = current->cpus_allowed;
1251 cpumask_of_cpu_ptr(newmask, cpu);
1247 unsigned int khz = 0; 1252 unsigned int khz = 0;
1248 unsigned int first; 1253 unsigned int first;
1249 1254
@@ -1253,7 +1258,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
1253 if (!data) 1258 if (!data)
1254 return -EINVAL; 1259 return -EINVAL;
1255 1260
1256 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 1261 set_cpus_allowed_ptr(current, newmask);
1257 if (smp_processor_id() != cpu) { 1262 if (smp_processor_id() != cpu) {
1258 printk(KERN_ERR PFX 1263 printk(KERN_ERR PFX
1259 "limiting to CPU %d failed in powernowk8_get\n", cpu); 1264 "limiting to CPU %d failed in powernowk8_get\n", cpu);
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index 908dd347c67e..ca2ac13b7af2 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -28,7 +28,8 @@
28#define PFX "speedstep-centrino: " 28#define PFX "speedstep-centrino: "
29#define MAINTAINER "cpufreq@lists.linux.org.uk" 29#define MAINTAINER "cpufreq@lists.linux.org.uk"
30 30
31#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) 31#define dprintk(msg...) \
32 cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
32 33
33#define INTEL_MSR_RANGE (0xffff) 34#define INTEL_MSR_RANGE (0xffff)
34 35
@@ -66,11 +67,12 @@ struct cpu_model
66 67
67 struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */ 68 struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */
68}; 69};
69static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x); 70static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
71 const struct cpu_id *x);
70 72
71/* Operating points for current CPU */ 73/* Operating points for current CPU */
72static struct cpu_model *centrino_model[NR_CPUS]; 74static DEFINE_PER_CPU(struct cpu_model *, centrino_model);
73static const struct cpu_id *centrino_cpu[NR_CPUS]; 75static DEFINE_PER_CPU(const struct cpu_id *, centrino_cpu);
74 76
75static struct cpufreq_driver centrino_driver; 77static struct cpufreq_driver centrino_driver;
76 78
@@ -255,7 +257,7 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy)
255 return -ENOENT; 257 return -ENOENT;
256 } 258 }
257 259
258 centrino_model[policy->cpu] = model; 260 per_cpu(centrino_model, policy->cpu) = model;
259 261
260 dprintk("found \"%s\": max frequency: %dkHz\n", 262 dprintk("found \"%s\": max frequency: %dkHz\n",
261 model->model_name, model->max_freq); 263 model->model_name, model->max_freq);
@@ -264,10 +266,14 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy)
264} 266}
265 267
266#else 268#else
267static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) { return -ENODEV; } 269static inline int centrino_cpu_init_table(struct cpufreq_policy *policy)
270{
271 return -ENODEV;
272}
268#endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */ 273#endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */
269 274
270static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x) 275static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
276 const struct cpu_id *x)
271{ 277{
272 if ((c->x86 == x->x86) && 278 if ((c->x86 == x->x86) &&
273 (c->x86_model == x->x86_model) && 279 (c->x86_model == x->x86_model) &&
@@ -286,23 +292,28 @@ static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe)
286 * for centrino, as some DSDTs are buggy. 292 * for centrino, as some DSDTs are buggy.
287 * Ideally, this can be done using the acpi_data structure. 293 * Ideally, this can be done using the acpi_data structure.
288 */ 294 */
289 if ((centrino_cpu[cpu] == &cpu_ids[CPU_BANIAS]) || 295 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) ||
290 (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_A1]) || 296 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) ||
291 (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_B0])) { 297 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) {
292 msr = (msr >> 8) & 0xff; 298 msr = (msr >> 8) & 0xff;
293 return msr * 100000; 299 return msr * 100000;
294 } 300 }
295 301
296 if ((!centrino_model[cpu]) || (!centrino_model[cpu]->op_points)) 302 if ((!per_cpu(centrino_model, cpu)) ||
303 (!per_cpu(centrino_model, cpu)->op_points))
297 return 0; 304 return 0;
298 305
299 msr &= 0xffff; 306 msr &= 0xffff;
300 for (i=0;centrino_model[cpu]->op_points[i].frequency != CPUFREQ_TABLE_END; i++) { 307 for (i = 0;
301 if (msr == centrino_model[cpu]->op_points[i].index) 308 per_cpu(centrino_model, cpu)->op_points[i].frequency
302 return centrino_model[cpu]->op_points[i].frequency; 309 != CPUFREQ_TABLE_END;
310 i++) {
311 if (msr == per_cpu(centrino_model, cpu)->op_points[i].index)
312 return per_cpu(centrino_model, cpu)->
313 op_points[i].frequency;
303 } 314 }
304 if (failsafe) 315 if (failsafe)
305 return centrino_model[cpu]->op_points[i-1].frequency; 316 return per_cpu(centrino_model, cpu)->op_points[i-1].frequency;
306 else 317 else
307 return 0; 318 return 0;
308} 319}
@@ -313,9 +324,10 @@ static unsigned int get_cur_freq(unsigned int cpu)
313 unsigned l, h; 324 unsigned l, h;
314 unsigned clock_freq; 325 unsigned clock_freq;
315 cpumask_t saved_mask; 326 cpumask_t saved_mask;
327 cpumask_of_cpu_ptr(new_mask, cpu);
316 328
317 saved_mask = current->cpus_allowed; 329 saved_mask = current->cpus_allowed;
318 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 330 set_cpus_allowed_ptr(current, new_mask);
319 if (smp_processor_id() != cpu) 331 if (smp_processor_id() != cpu)
320 return 0; 332 return 0;
321 333
@@ -347,7 +359,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
347 int i; 359 int i;
348 360
349 /* Only Intel makes Enhanced Speedstep-capable CPUs */ 361 /* Only Intel makes Enhanced Speedstep-capable CPUs */
350 if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST)) 362 if (cpu->x86_vendor != X86_VENDOR_INTEL ||
363 !cpu_has(cpu, X86_FEATURE_EST))
351 return -ENODEV; 364 return -ENODEV;
352 365
353 if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) 366 if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
@@ -361,9 +374,9 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
361 break; 374 break;
362 375
363 if (i != N_IDS) 376 if (i != N_IDS)
364 centrino_cpu[policy->cpu] = &cpu_ids[i]; 377 per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i];
365 378
366 if (!centrino_cpu[policy->cpu]) { 379 if (!per_cpu(centrino_cpu, policy->cpu)) {
367 dprintk("found unsupported CPU with " 380 dprintk("found unsupported CPU with "
368 "Enhanced SpeedStep: send /proc/cpuinfo to " 381 "Enhanced SpeedStep: send /proc/cpuinfo to "
369 MAINTAINER "\n"); 382 MAINTAINER "\n");
@@ -386,23 +399,26 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
386 /* check to see if it stuck */ 399 /* check to see if it stuck */
387 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 400 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
388 if (!(l & (1<<16))) { 401 if (!(l & (1<<16))) {
389 printk(KERN_INFO PFX "couldn't enable Enhanced SpeedStep\n"); 402 printk(KERN_INFO PFX
403 "couldn't enable Enhanced SpeedStep\n");
390 return -ENODEV; 404 return -ENODEV;
391 } 405 }
392 } 406 }
393 407
394 freq = get_cur_freq(policy->cpu); 408 freq = get_cur_freq(policy->cpu);
395 409 policy->cpuinfo.transition_latency = 10000;
396 policy->cpuinfo.transition_latency = 10000; /* 10uS transition latency */ 410 /* 10uS transition latency */
397 policy->cur = freq; 411 policy->cur = freq;
398 412
399 dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur); 413 dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur);
400 414
401 ret = cpufreq_frequency_table_cpuinfo(policy, centrino_model[policy->cpu]->op_points); 415 ret = cpufreq_frequency_table_cpuinfo(policy,
416 per_cpu(centrino_model, policy->cpu)->op_points);
402 if (ret) 417 if (ret)
403 return (ret); 418 return (ret);
404 419
405 cpufreq_frequency_table_get_attr(centrino_model[policy->cpu]->op_points, policy->cpu); 420 cpufreq_frequency_table_get_attr(
421 per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu);
406 422
407 return 0; 423 return 0;
408} 424}
@@ -411,12 +427,12 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
411{ 427{
412 unsigned int cpu = policy->cpu; 428 unsigned int cpu = policy->cpu;
413 429
414 if (!centrino_model[cpu]) 430 if (!per_cpu(centrino_model, cpu))
415 return -ENODEV; 431 return -ENODEV;
416 432
417 cpufreq_frequency_table_put_attr(cpu); 433 cpufreq_frequency_table_put_attr(cpu);
418 434
419 centrino_model[cpu] = NULL; 435 per_cpu(centrino_model, cpu) = NULL;
420 436
421 return 0; 437 return 0;
422} 438}
@@ -430,17 +446,26 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
430 */ 446 */
431static int centrino_verify (struct cpufreq_policy *policy) 447static int centrino_verify (struct cpufreq_policy *policy)
432{ 448{
433 return cpufreq_frequency_table_verify(policy, centrino_model[policy->cpu]->op_points); 449 return cpufreq_frequency_table_verify(policy,
450 per_cpu(centrino_model, policy->cpu)->op_points);
434} 451}
435 452
436/** 453/**
437 * centrino_setpolicy - set a new CPUFreq policy 454 * centrino_setpolicy - set a new CPUFreq policy
438 * @policy: new policy 455 * @policy: new policy
439 * @target_freq: the target frequency 456 * @target_freq: the target frequency
440 * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) 457 * @relation: how that frequency relates to achieved frequency
458 * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
441 * 459 *
442 * Sets a new CPUFreq policy. 460 * Sets a new CPUFreq policy.
443 */ 461 */
462struct allmasks {
463 cpumask_t online_policy_cpus;
464 cpumask_t saved_mask;
465 cpumask_t set_mask;
466 cpumask_t covered_cpus;
467};
468
444static int centrino_target (struct cpufreq_policy *policy, 469static int centrino_target (struct cpufreq_policy *policy,
445 unsigned int target_freq, 470 unsigned int target_freq,
446 unsigned int relation) 471 unsigned int relation)
@@ -448,48 +473,55 @@ static int centrino_target (struct cpufreq_policy *policy,
448 unsigned int newstate = 0; 473 unsigned int newstate = 0;
449 unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; 474 unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
450 struct cpufreq_freqs freqs; 475 struct cpufreq_freqs freqs;
451 cpumask_t online_policy_cpus;
452 cpumask_t saved_mask;
453 cpumask_t set_mask;
454 cpumask_t covered_cpus;
455 int retval = 0; 476 int retval = 0;
456 unsigned int j, k, first_cpu, tmp; 477 unsigned int j, k, first_cpu, tmp;
457 478 CPUMASK_ALLOC(allmasks);
458 if (unlikely(centrino_model[cpu] == NULL)) 479 CPUMASK_PTR(online_policy_cpus, allmasks);
459 return -ENODEV; 480 CPUMASK_PTR(saved_mask, allmasks);
481 CPUMASK_PTR(set_mask, allmasks);
482 CPUMASK_PTR(covered_cpus, allmasks);
483
484 if (unlikely(allmasks == NULL))
485 return -ENOMEM;
486
487 if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
488 retval = -ENODEV;
489 goto out;
490 }
460 491
461 if (unlikely(cpufreq_frequency_table_target(policy, 492 if (unlikely(cpufreq_frequency_table_target(policy,
462 centrino_model[cpu]->op_points, 493 per_cpu(centrino_model, cpu)->op_points,
463 target_freq, 494 target_freq,
464 relation, 495 relation,
465 &newstate))) { 496 &newstate))) {
466 return -EINVAL; 497 retval = -EINVAL;
498 goto out;
467 } 499 }
468 500
469#ifdef CONFIG_HOTPLUG_CPU 501#ifdef CONFIG_HOTPLUG_CPU
470 /* cpufreq holds the hotplug lock, so we are safe from here on */ 502 /* cpufreq holds the hotplug lock, so we are safe from here on */
471 cpus_and(online_policy_cpus, cpu_online_map, policy->cpus); 503 cpus_and(*online_policy_cpus, cpu_online_map, policy->cpus);
472#else 504#else
473 online_policy_cpus = policy->cpus; 505 *online_policy_cpus = policy->cpus;
474#endif 506#endif
475 507
476 saved_mask = current->cpus_allowed; 508 *saved_mask = current->cpus_allowed;
477 first_cpu = 1; 509 first_cpu = 1;
478 cpus_clear(covered_cpus); 510 cpus_clear(*covered_cpus);
479 for_each_cpu_mask(j, online_policy_cpus) { 511 for_each_cpu_mask_nr(j, *online_policy_cpus) {
480 /* 512 /*
481 * Support for SMP systems. 513 * Support for SMP systems.
482 * Make sure we are running on CPU that wants to change freq 514 * Make sure we are running on CPU that wants to change freq
483 */ 515 */
484 cpus_clear(set_mask); 516 cpus_clear(*set_mask);
485 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 517 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
486 cpus_or(set_mask, set_mask, online_policy_cpus); 518 cpus_or(*set_mask, *set_mask, *online_policy_cpus);
487 else 519 else
488 cpu_set(j, set_mask); 520 cpu_set(j, *set_mask);
489 521
490 set_cpus_allowed_ptr(current, &set_mask); 522 set_cpus_allowed_ptr(current, set_mask);
491 preempt_disable(); 523 preempt_disable();
492 if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) { 524 if (unlikely(!cpu_isset(smp_processor_id(), *set_mask))) {
493 dprintk("couldn't limit to CPUs in this domain\n"); 525 dprintk("couldn't limit to CPUs in this domain\n");
494 retval = -EAGAIN; 526 retval = -EAGAIN;
495 if (first_cpu) { 527 if (first_cpu) {
@@ -500,7 +532,7 @@ static int centrino_target (struct cpufreq_policy *policy,
500 break; 532 break;
501 } 533 }
502 534
503 msr = centrino_model[cpu]->op_points[newstate].index; 535 msr = per_cpu(centrino_model, cpu)->op_points[newstate].index;
504 536
505 if (first_cpu) { 537 if (first_cpu) {
506 rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); 538 rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
@@ -517,7 +549,7 @@ static int centrino_target (struct cpufreq_policy *policy,
517 dprintk("target=%dkHz old=%d new=%d msr=%04x\n", 549 dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
518 target_freq, freqs.old, freqs.new, msr); 550 target_freq, freqs.old, freqs.new, msr);
519 551
520 for_each_cpu_mask(k, online_policy_cpus) { 552 for_each_cpu_mask_nr(k, *online_policy_cpus) {
521 freqs.cpu = k; 553 freqs.cpu = k;
522 cpufreq_notify_transition(&freqs, 554 cpufreq_notify_transition(&freqs,
523 CPUFREQ_PRECHANGE); 555 CPUFREQ_PRECHANGE);
@@ -536,11 +568,11 @@ static int centrino_target (struct cpufreq_policy *policy,
536 break; 568 break;
537 } 569 }
538 570
539 cpu_set(j, covered_cpus); 571 cpu_set(j, *covered_cpus);
540 preempt_enable(); 572 preempt_enable();
541 } 573 }
542 574
543 for_each_cpu_mask(k, online_policy_cpus) { 575 for_each_cpu_mask_nr(k, *online_policy_cpus) {
544 freqs.cpu = k; 576 freqs.cpu = k;
545 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 577 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
546 } 578 }
@@ -553,10 +585,12 @@ static int centrino_target (struct cpufreq_policy *policy,
553 * Best effort undo.. 585 * Best effort undo..
554 */ 586 */
555 587
556 if (!cpus_empty(covered_cpus)) { 588 if (!cpus_empty(*covered_cpus)) {
557 for_each_cpu_mask(j, covered_cpus) { 589 cpumask_of_cpu_ptr_declare(new_mask);
558 set_cpus_allowed_ptr(current, 590
559 &cpumask_of_cpu(j)); 591 for_each_cpu_mask_nr(j, *covered_cpus) {
592 cpumask_of_cpu_ptr_next(new_mask, j);
593 set_cpus_allowed_ptr(current, new_mask);
560 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); 594 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
561 } 595 }
562 } 596 }
@@ -564,19 +598,22 @@ static int centrino_target (struct cpufreq_policy *policy,
564 tmp = freqs.new; 598 tmp = freqs.new;
565 freqs.new = freqs.old; 599 freqs.new = freqs.old;
566 freqs.old = tmp; 600 freqs.old = tmp;
567 for_each_cpu_mask(j, online_policy_cpus) { 601 for_each_cpu_mask_nr(j, *online_policy_cpus) {
568 freqs.cpu = j; 602 freqs.cpu = j;
569 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 603 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
570 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 604 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
571 } 605 }
572 } 606 }
573 set_cpus_allowed_ptr(current, &saved_mask); 607 set_cpus_allowed_ptr(current, saved_mask);
574 return 0; 608 retval = 0;
609 goto out;
575 610
576migrate_end: 611migrate_end:
577 preempt_enable(); 612 preempt_enable();
578 set_cpus_allowed_ptr(current, &saved_mask); 613 set_cpus_allowed_ptr(current, saved_mask);
579 return 0; 614out:
615 CPUMASK_FREE(allmasks);
616 return retval;
580} 617}
581 618
582static struct freq_attr* centrino_attr[] = { 619static struct freq_attr* centrino_attr[] = {
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 1b50244b1fdf..2f3728dc24f6 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -244,7 +244,8 @@ static unsigned int _speedstep_get(const cpumask_t *cpus)
244 244
245static unsigned int speedstep_get(unsigned int cpu) 245static unsigned int speedstep_get(unsigned int cpu)
246{ 246{
247 return _speedstep_get(&cpumask_of_cpu(cpu)); 247 cpumask_of_cpu_ptr(newmask, cpu);
248 return _speedstep_get(newmask);
248} 249}
249 250
250/** 251/**
@@ -279,7 +280,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
279 280
280 cpus_allowed = current->cpus_allowed; 281 cpus_allowed = current->cpus_allowed;
281 282
282 for_each_cpu_mask(i, policy->cpus) { 283 for_each_cpu_mask_nr(i, policy->cpus) {
283 freqs.cpu = i; 284 freqs.cpu = i;
284 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 285 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
285 } 286 }
@@ -292,7 +293,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
292 /* allow to be run on all CPUs */ 293 /* allow to be run on all CPUs */
293 set_cpus_allowed_ptr(current, &cpus_allowed); 294 set_cpus_allowed_ptr(current, &cpus_allowed);
294 295
295 for_each_cpu_mask(i, policy->cpus) { 296 for_each_cpu_mask_nr(i, policy->cpus) {
296 freqs.cpu = i; 297 freqs.cpu = i;
297 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 298 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
298 } 299 }
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 2c8afafa18e8..e4b8d189d7ed 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -489,7 +489,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
489 int sibling; 489 int sibling;
490 490
491 this_leaf = CPUID4_INFO_IDX(cpu, index); 491 this_leaf = CPUID4_INFO_IDX(cpu, index);
492 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) { 492 for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
493 sibling_leaf = CPUID4_INFO_IDX(sibling, index); 493 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
494 cpu_clear(cpu, sibling_leaf->shared_cpu_map); 494 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
495 } 495 }
@@ -516,6 +516,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
516 unsigned long j; 516 unsigned long j;
517 int retval; 517 int retval;
518 cpumask_t oldmask; 518 cpumask_t oldmask;
519 cpumask_of_cpu_ptr(newmask, cpu);
519 520
520 if (num_cache_leaves == 0) 521 if (num_cache_leaves == 0)
521 return -ENOENT; 522 return -ENOENT;
@@ -526,7 +527,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
526 return -ENOMEM; 527 return -ENOMEM;
527 528
528 oldmask = current->cpus_allowed; 529 oldmask = current->cpus_allowed;
529 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 530 retval = set_cpus_allowed_ptr(current, newmask);
530 if (retval) 531 if (retval)
531 goto out; 532 goto out;
532 533
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index c4a7ec31394c..2fe06ab5c547 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -580,7 +580,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
580 char __user *buf = ubuf; 580 char __user *buf = ubuf;
581 int i, err; 581 int i, err;
582 582
583 cpu_tsc = kmalloc(NR_CPUS * sizeof(long), GFP_KERNEL); 583 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
584 if (!cpu_tsc) 584 if (!cpu_tsc)
585 return -ENOMEM; 585 return -ENOMEM;
586 586
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 7c9a813e1193..88736cadbaa6 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -527,7 +527,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
527 if (err) 527 if (err)
528 goto out_free; 528 goto out_free;
529 529
530 for_each_cpu_mask(i, b->cpus) { 530 for_each_cpu_mask_nr(i, b->cpus) {
531 if (i == cpu) 531 if (i == cpu)
532 continue; 532 continue;
533 533
@@ -617,7 +617,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
617#endif 617#endif
618 618
619 /* remove all sibling symlinks before unregistering */ 619 /* remove all sibling symlinks before unregistering */
620 for_each_cpu_mask(i, b->cpus) { 620 for_each_cpu_mask_nr(i, b->cpus) {
621 if (i == cpu) 621 if (i == cpu)
622 continue; 622 continue;
623 623
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 0d0d9057e7c0..a26c480b9491 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -160,7 +160,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
160{ 160{
161 if (*pos == 0) /* just in case, cpu 0 is not the first */ 161 if (*pos == 0) /* just in case, cpu 0 is not the first */
162 *pos = first_cpu(cpu_online_map); 162 *pos = first_cpu(cpu_online_map);
163 if ((*pos) < NR_CPUS && cpu_online(*pos)) 163 if ((*pos) < nr_cpu_ids && cpu_online(*pos))
164 return &cpu_data(*pos); 164 return &cpu_data(*pos);
165 return NULL; 165 return NULL;
166} 166}
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index 1a9c68845ee8..786548a62d38 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -168,7 +168,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
168 * May as well be the first. 168 * May as well be the first.
169 */ 169 */
170 cpu = first_cpu(cpumask); 170 cpu = first_cpu(cpumask);
171 if ((unsigned)cpu < NR_CPUS) 171 if ((unsigned)cpu < nr_cpu_ids)
172 return per_cpu(x86_cpu_to_apicid, cpu); 172 return per_cpu(x86_cpu_to_apicid, cpu);
173 else 173 else
174 return BAD_APICID; 174 return BAD_APICID;
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index 711f11c30b06..3e5d7b8698f9 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -94,7 +94,7 @@ static void uv_send_IPI_mask(cpumask_t mask, int vector)
94{ 94{
95 unsigned int cpu; 95 unsigned int cpu;
96 96
97 for (cpu = 0; cpu < NR_CPUS; ++cpu) 97 for_each_possible_cpu(cpu)
98 if (cpu_isset(cpu, mask)) 98 if (cpu_isset(cpu, mask))
99 uv_send_IPI_one(cpu, vector); 99 uv_send_IPI_one(cpu, vector);
100} 100}
@@ -128,7 +128,7 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
128 * May as well be the first. 128 * May as well be the first.
129 */ 129 */
130 cpu = first_cpu(cpumask); 130 cpu = first_cpu(cpumask);
131 if ((unsigned)cpu < NR_CPUS) 131 if ((unsigned)cpu < nr_cpu_ids)
132 return per_cpu(x86_cpu_to_apicid, cpu); 132 return per_cpu(x86_cpu_to_apicid, cpu);
133 else 133 else
134 return BAD_APICID; 134 return BAD_APICID;
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 6510cde36b35..a85db790754b 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -731,7 +731,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask)
731 return 0; 731 return 0;
732 } 732 }
733 733
734 for_each_cpu_mask(cpu, mask) { 734 for_each_cpu_mask_nr(cpu, mask) {
735 cpumask_t domain, new_mask; 735 cpumask_t domain, new_mask;
736 int new_cpu; 736 int new_cpu;
737 int vector, offset; 737 int vector, offset;
@@ -752,7 +752,7 @@ next:
752 continue; 752 continue;
753 if (vector == IA32_SYSCALL_VECTOR) 753 if (vector == IA32_SYSCALL_VECTOR)
754 goto next; 754 goto next;
755 for_each_cpu_mask(new_cpu, new_mask) 755 for_each_cpu_mask_nr(new_cpu, new_mask)
756 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 756 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
757 goto next; 757 goto next;
758 /* Found one! */ 758 /* Found one! */
@@ -762,7 +762,7 @@ next:
762 cfg->move_in_progress = 1; 762 cfg->move_in_progress = 1;
763 cfg->old_domain = cfg->domain; 763 cfg->old_domain = cfg->domain;
764 } 764 }
765 for_each_cpu_mask(new_cpu, new_mask) 765 for_each_cpu_mask_nr(new_cpu, new_mask)
766 per_cpu(vector_irq, new_cpu)[vector] = irq; 766 per_cpu(vector_irq, new_cpu)[vector] = irq;
767 cfg->vector = vector; 767 cfg->vector = vector;
768 cfg->domain = domain; 768 cfg->domain = domain;
@@ -794,7 +794,7 @@ static void __clear_irq_vector(int irq)
794 794
795 vector = cfg->vector; 795 vector = cfg->vector;
796 cpus_and(mask, cfg->domain, cpu_online_map); 796 cpus_and(mask, cfg->domain, cpu_online_map);
797 for_each_cpu_mask(cpu, mask) 797 for_each_cpu_mask_nr(cpu, mask)
798 per_cpu(vector_irq, cpu)[vector] = -1; 798 per_cpu(vector_irq, cpu)[vector] = -1;
799 799
800 cfg->vector = 0; 800 cfg->vector = 0;
@@ -1372,12 +1372,10 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
1372static int ioapic_retrigger_irq(unsigned int irq) 1372static int ioapic_retrigger_irq(unsigned int irq)
1373{ 1373{
1374 struct irq_cfg *cfg = &irq_cfg[irq]; 1374 struct irq_cfg *cfg = &irq_cfg[irq];
1375 cpumask_t mask;
1376 unsigned long flags; 1375 unsigned long flags;
1377 1376
1378 spin_lock_irqsave(&vector_lock, flags); 1377 spin_lock_irqsave(&vector_lock, flags);
1379 mask = cpumask_of_cpu(first_cpu(cfg->domain)); 1378 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
1380 send_IPI_mask(mask, cfg->vector);
1381 spin_unlock_irqrestore(&vector_lock, flags); 1379 spin_unlock_irqrestore(&vector_lock, flags);
1382 1380
1383 return 1; 1381 return 1;
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index a8449571858a..3fee2aa50f3f 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -62,12 +62,12 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
62 62
63 if (reload) { 63 if (reload) {
64#ifdef CONFIG_SMP 64#ifdef CONFIG_SMP
65 cpumask_t mask; 65 cpumask_of_cpu_ptr_declare(mask);
66 66
67 preempt_disable(); 67 preempt_disable();
68 load_LDT(pc); 68 load_LDT(pc);
69 mask = cpumask_of_cpu(smp_processor_id()); 69 cpumask_of_cpu_ptr_next(mask, smp_processor_id());
70 if (!cpus_equal(current->mm->cpu_vm_mask, mask)) 70 if (!cpus_equal(current->mm->cpu_vm_mask, *mask))
71 smp_call_function(flush_ldt, current->mm, 1); 71 smp_call_function(flush_ldt, current->mm, 1);
72 preempt_enable(); 72 preempt_enable();
73#else 73#else
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c
index 56b933119a04..58520169e35d 100644
--- a/arch/x86/kernel/microcode.c
+++ b/arch/x86/kernel/microcode.c
@@ -388,6 +388,7 @@ static int do_microcode_update (void)
388 void *new_mc = NULL; 388 void *new_mc = NULL;
389 int cpu; 389 int cpu;
390 cpumask_t old; 390 cpumask_t old;
391 cpumask_of_cpu_ptr_declare(newmask);
391 392
392 old = current->cpus_allowed; 393 old = current->cpus_allowed;
393 394
@@ -404,7 +405,8 @@ static int do_microcode_update (void)
404 405
405 if (!uci->valid) 406 if (!uci->valid)
406 continue; 407 continue;
407 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 408 cpumask_of_cpu_ptr_next(newmask, cpu);
409 set_cpus_allowed_ptr(current, newmask);
408 error = get_maching_microcode(new_mc, cpu); 410 error = get_maching_microcode(new_mc, cpu);
409 if (error < 0) 411 if (error < 0)
410 goto out; 412 goto out;
@@ -574,6 +576,7 @@ static int apply_microcode_check_cpu(int cpu)
574 struct cpuinfo_x86 *c = &cpu_data(cpu); 576 struct cpuinfo_x86 *c = &cpu_data(cpu);
575 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 577 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
576 cpumask_t old; 578 cpumask_t old;
579 cpumask_of_cpu_ptr(newmask, cpu);
577 unsigned int val[2]; 580 unsigned int val[2];
578 int err = 0; 581 int err = 0;
579 582
@@ -582,7 +585,7 @@ static int apply_microcode_check_cpu(int cpu)
582 return 0; 585 return 0;
583 586
584 old = current->cpus_allowed; 587 old = current->cpus_allowed;
585 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 588 set_cpus_allowed_ptr(current, newmask);
586 589
587 /* Check if the microcode we have in memory matches the CPU */ 590 /* Check if the microcode we have in memory matches the CPU */
588 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || 591 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
@@ -620,11 +623,12 @@ static int apply_microcode_check_cpu(int cpu)
620static void microcode_init_cpu(int cpu, int resume) 623static void microcode_init_cpu(int cpu, int resume)
621{ 624{
622 cpumask_t old; 625 cpumask_t old;
626 cpumask_of_cpu_ptr(newmask, cpu);
623 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 627 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
624 628
625 old = current->cpus_allowed; 629 old = current->cpus_allowed;
626 630
627 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 631 set_cpus_allowed_ptr(current, newmask);
628 mutex_lock(&microcode_mutex); 632 mutex_lock(&microcode_mutex);
629 collect_cpu_info(cpu); 633 collect_cpu_info(cpu);
630 if (uci->valid && system_state == SYSTEM_RUNNING && !resume) 634 if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
@@ -656,11 +660,12 @@ static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz)
656 return -EINVAL; 660 return -EINVAL;
657 if (val == 1) { 661 if (val == 1) {
658 cpumask_t old; 662 cpumask_t old;
663 cpumask_of_cpu_ptr(newmask, cpu);
659 664
660 old = current->cpus_allowed; 665 old = current->cpus_allowed;
661 666
662 get_online_cpus(); 667 get_online_cpus();
663 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 668 set_cpus_allowed_ptr(current, newmask);
664 669
665 mutex_lock(&microcode_mutex); 670 mutex_lock(&microcode_mutex);
666 if (uci->valid) 671 if (uci->valid)
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index f8a62160e151..214bbdfc851e 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -403,24 +403,28 @@ void native_machine_shutdown(void)
403{ 403{
404 /* Stop the cpus and apics */ 404 /* Stop the cpus and apics */
405#ifdef CONFIG_SMP 405#ifdef CONFIG_SMP
406 int reboot_cpu_id;
407 406
408 /* The boot cpu is always logical cpu 0 */ 407 /* The boot cpu is always logical cpu 0 */
409 reboot_cpu_id = 0; 408 int reboot_cpu_id = 0;
409 cpumask_of_cpu_ptr(newmask, reboot_cpu_id);
410 410
411#ifdef CONFIG_X86_32 411#ifdef CONFIG_X86_32
412 /* See if there has been given a command line override */ 412 /* See if there has been given a command line override */
413 if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && 413 if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
414 cpu_online(reboot_cpu)) 414 cpu_online(reboot_cpu)) {
415 reboot_cpu_id = reboot_cpu; 415 reboot_cpu_id = reboot_cpu;
416 cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
417 }
416#endif 418#endif
417 419
418 /* Make certain the cpu I'm about to reboot on is online */ 420 /* Make certain the cpu I'm about to reboot on is online */
419 if (!cpu_online(reboot_cpu_id)) 421 if (!cpu_online(reboot_cpu_id)) {
420 reboot_cpu_id = smp_processor_id(); 422 reboot_cpu_id = smp_processor_id();
423 cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
424 }
421 425
422 /* Make certain I only run on the appropriate processor */ 426 /* Make certain I only run on the appropriate processor */
423 set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); 427 set_cpus_allowed_ptr(current, newmask);
424 428
425 /* O.K Now that I'm on the appropriate processor, 429 /* O.K Now that I'm on the appropriate processor,
426 * stop all of the others. 430 * stop all of the others.
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 687376ab07e8..09b98cd6332c 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -438,7 +438,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
438 cpu_set(cpu, cpu_sibling_setup_map); 438 cpu_set(cpu, cpu_sibling_setup_map);
439 439
440 if (smp_num_siblings > 1) { 440 if (smp_num_siblings > 1) {
441 for_each_cpu_mask(i, cpu_sibling_setup_map) { 441 for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
442 if (c->phys_proc_id == cpu_data(i).phys_proc_id && 442 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
443 c->cpu_core_id == cpu_data(i).cpu_core_id) { 443 c->cpu_core_id == cpu_data(i).cpu_core_id) {
444 cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 444 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
@@ -461,7 +461,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
461 return; 461 return;
462 } 462 }
463 463
464 for_each_cpu_mask(i, cpu_sibling_setup_map) { 464 for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
465 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && 465 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
466 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { 466 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
467 cpu_set(i, c->llc_shared_map); 467 cpu_set(i, c->llc_shared_map);
@@ -1230,7 +1230,7 @@ static void remove_siblinginfo(int cpu)
1230 int sibling; 1230 int sibling;
1231 struct cpuinfo_x86 *c = &cpu_data(cpu); 1231 struct cpuinfo_x86 *c = &cpu_data(cpu);
1232 1232
1233 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { 1233 for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
1234 cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); 1234 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
1235 /*/ 1235 /*/
1236 * last thread sibling in this cpu core going down 1236 * last thread sibling in this cpu core going down
@@ -1239,7 +1239,7 @@ static void remove_siblinginfo(int cpu)
1239 cpu_data(sibling).booted_cores--; 1239 cpu_data(sibling).booted_cores--;
1240 } 1240 }
1241 1241
1242 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) 1242 for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
1243 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); 1243 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1244 cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1244 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1245 cpus_clear(per_cpu(cpu_core_map, cpu)); 1245 cpus_clear(per_cpu(cpu_core_map, cpu));
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 233156f39b7f..463adecc5cba 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -351,7 +351,7 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
351 351
352 cpus_and(mask, mask, cpu_online_map); 352 cpus_and(mask, mask, cpu_online_map);
353 353
354 for_each_cpu_mask(cpu, mask) 354 for_each_cpu_mask_nr(cpu, mask)
355 xen_send_IPI_one(cpu, vector); 355 xen_send_IPI_one(cpu, vector);
356} 356}
357 357
@@ -362,7 +362,7 @@ void xen_smp_send_call_function_ipi(cpumask_t mask)
362 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); 362 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
363 363
364 /* Make sure other vcpus get a chance to run if they need to. */ 364 /* Make sure other vcpus get a chance to run if they need to. */
365 for_each_cpu_mask(cpu, mask) { 365 for_each_cpu_mask_nr(cpu, mask) {
366 if (xen_vcpu_stolen(cpu)) { 366 if (xen_vcpu_stolen(cpu)) {
367 HYPERVISOR_sched_op(SCHEDOP_yield, 0); 367 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
368 break; 368 break;
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 0622ace05220..a2c3f9cfa549 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -827,6 +827,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
827static int acpi_processor_get_throttling(struct acpi_processor *pr) 827static int acpi_processor_get_throttling(struct acpi_processor *pr)
828{ 828{
829 cpumask_t saved_mask; 829 cpumask_t saved_mask;
830 cpumask_of_cpu_ptr_declare(new_mask);
830 int ret; 831 int ret;
831 832
832 if (!pr) 833 if (!pr)
@@ -838,7 +839,8 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
838 * Migrate task to the cpu pointed by pr. 839 * Migrate task to the cpu pointed by pr.
839 */ 840 */
840 saved_mask = current->cpus_allowed; 841 saved_mask = current->cpus_allowed;
841 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); 842 cpumask_of_cpu_ptr_next(new_mask, pr->id);
843 set_cpus_allowed_ptr(current, new_mask);
842 ret = pr->throttling.acpi_processor_get_throttling(pr); 844 ret = pr->throttling.acpi_processor_get_throttling(pr);
843 /* restore the previous state */ 845 /* restore the previous state */
844 set_cpus_allowed_ptr(current, &saved_mask); 846 set_cpus_allowed_ptr(current, &saved_mask);
@@ -987,6 +989,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
987int acpi_processor_set_throttling(struct acpi_processor *pr, int state) 989int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
988{ 990{
989 cpumask_t saved_mask; 991 cpumask_t saved_mask;
992 cpumask_of_cpu_ptr_declare(new_mask);
990 int ret = 0; 993 int ret = 0;
991 unsigned int i; 994 unsigned int i;
992 struct acpi_processor *match_pr; 995 struct acpi_processor *match_pr;
@@ -1013,7 +1016,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1013 * affected cpu in order to get one proper T-state. 1016 * affected cpu in order to get one proper T-state.
1014 * The notifier event is THROTTLING_PRECHANGE. 1017 * The notifier event is THROTTLING_PRECHANGE.
1015 */ 1018 */
1016 for_each_cpu_mask(i, online_throttling_cpus) { 1019 for_each_cpu_mask_nr(i, online_throttling_cpus) {
1017 t_state.cpu = i; 1020 t_state.cpu = i;
1018 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, 1021 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
1019 &t_state); 1022 &t_state);
@@ -1025,7 +1028,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1025 * it can be called only for the cpu pointed by pr. 1028 * it can be called only for the cpu pointed by pr.
1026 */ 1029 */
1027 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1030 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
1028 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); 1031 cpumask_of_cpu_ptr_next(new_mask, pr->id);
1032 set_cpus_allowed_ptr(current, new_mask);
1029 ret = p_throttling->acpi_processor_set_throttling(pr, 1033 ret = p_throttling->acpi_processor_set_throttling(pr,
1030 t_state.target_state); 1034 t_state.target_state);
1031 } else { 1035 } else {
@@ -1034,7 +1038,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1034 * it is necessary to set T-state for every affected 1038 * it is necessary to set T-state for every affected
1035 * cpus. 1039 * cpus.
1036 */ 1040 */
1037 for_each_cpu_mask(i, online_throttling_cpus) { 1041 for_each_cpu_mask_nr(i, online_throttling_cpus) {
1038 match_pr = per_cpu(processors, i); 1042 match_pr = per_cpu(processors, i);
1039 /* 1043 /*
1040 * If the pointer is invalid, we will report the 1044 * If the pointer is invalid, we will report the
@@ -1056,7 +1060,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1056 continue; 1060 continue;
1057 } 1061 }
1058 t_state.cpu = i; 1062 t_state.cpu = i;
1059 set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); 1063 cpumask_of_cpu_ptr_next(new_mask, i);
1064 set_cpus_allowed_ptr(current, new_mask);
1060 ret = match_pr->throttling. 1065 ret = match_pr->throttling.
1061 acpi_processor_set_throttling( 1066 acpi_processor_set_throttling(
1062 match_pr, t_state.target_state); 1067 match_pr, t_state.target_state);
@@ -1068,7 +1073,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1068 * affected cpu to update the T-states. 1073 * affected cpu to update the T-states.
1069 * The notifier event is THROTTLING_POSTCHANGE 1074 * The notifier event is THROTTLING_POSTCHANGE
1070 */ 1075 */
1071 for_each_cpu_mask(i, online_throttling_cpus) { 1076 for_each_cpu_mask_nr(i, online_throttling_cpus) {
1072 t_state.cpu = i; 1077 t_state.cpu = i;
1073 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, 1078 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
1074 &t_state); 1079 &t_state);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index e38dfed41d80..5000402ae092 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -119,14 +119,14 @@ static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \
119{ \ 119{ \
120 return print_cpus_map(buf, &cpu_##type##_map); \ 120 return print_cpus_map(buf, &cpu_##type##_map); \
121} \ 121} \
122struct sysdev_class_attribute attr_##type##_map = \ 122static struct sysdev_class_attribute attr_##type##_map = \
123 _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL) 123 _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL)
124 124
125print_cpus_func(online); 125print_cpus_func(online);
126print_cpus_func(possible); 126print_cpus_func(possible);
127print_cpus_func(present); 127print_cpus_func(present);
128 128
129struct sysdev_class_attribute *cpu_state_attr[] = { 129static struct sysdev_class_attribute *cpu_state_attr[] = {
130 &attr_online_map, 130 &attr_online_map,
131 &attr_possible_map, 131 &attr_possible_map,
132 &attr_present_map, 132 &attr_present_map,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1d41496ed2f8..d8f8b1e0edde 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -589,7 +589,7 @@ static ssize_t show_cpus(cpumask_t mask, char *buf)
589 ssize_t i = 0; 589 ssize_t i = 0;
590 unsigned int cpu; 590 unsigned int cpu;
591 591
592 for_each_cpu_mask(cpu, mask) { 592 for_each_cpu_mask_nr(cpu, mask) {
593 if (i) 593 if (i)
594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
@@ -835,7 +835,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
835 } 835 }
836#endif 836#endif
837 837
838 for_each_cpu_mask(j, policy->cpus) { 838 for_each_cpu_mask_nr(j, policy->cpus) {
839 if (cpu == j) 839 if (cpu == j)
840 continue; 840 continue;
841 841
@@ -898,14 +898,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
898 } 898 }
899 899
900 spin_lock_irqsave(&cpufreq_driver_lock, flags); 900 spin_lock_irqsave(&cpufreq_driver_lock, flags);
901 for_each_cpu_mask(j, policy->cpus) { 901 for_each_cpu_mask_nr(j, policy->cpus) {
902 cpufreq_cpu_data[j] = policy; 902 cpufreq_cpu_data[j] = policy;
903 per_cpu(policy_cpu, j) = policy->cpu; 903 per_cpu(policy_cpu, j) = policy->cpu;
904 } 904 }
905 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 905 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
906 906
907 /* symlink affected CPUs */ 907 /* symlink affected CPUs */
908 for_each_cpu_mask(j, policy->cpus) { 908 for_each_cpu_mask_nr(j, policy->cpus) {
909 if (j == cpu) 909 if (j == cpu)
910 continue; 910 continue;
911 if (!cpu_online(j)) 911 if (!cpu_online(j))
@@ -945,7 +945,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
945 945
946err_out_unregister: 946err_out_unregister:
947 spin_lock_irqsave(&cpufreq_driver_lock, flags); 947 spin_lock_irqsave(&cpufreq_driver_lock, flags);
948 for_each_cpu_mask(j, policy->cpus) 948 for_each_cpu_mask_nr(j, policy->cpus)
949 cpufreq_cpu_data[j] = NULL; 949 cpufreq_cpu_data[j] = NULL;
950 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 950 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
951 951
@@ -1028,7 +1028,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1028 * links afterwards. 1028 * links afterwards.
1029 */ 1029 */
1030 if (unlikely(cpus_weight(data->cpus) > 1)) { 1030 if (unlikely(cpus_weight(data->cpus) > 1)) {
1031 for_each_cpu_mask(j, data->cpus) { 1031 for_each_cpu_mask_nr(j, data->cpus) {
1032 if (j == cpu) 1032 if (j == cpu)
1033 continue; 1033 continue;
1034 cpufreq_cpu_data[j] = NULL; 1034 cpufreq_cpu_data[j] = NULL;
@@ -1038,7 +1038,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1038 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1038 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1039 1039
1040 if (unlikely(cpus_weight(data->cpus) > 1)) { 1040 if (unlikely(cpus_weight(data->cpus) > 1)) {
1041 for_each_cpu_mask(j, data->cpus) { 1041 for_each_cpu_mask_nr(j, data->cpus) {
1042 if (j == cpu) 1042 if (j == cpu)
1043 continue; 1043 continue;
1044 dprintk("removing link for cpu %u\n", j); 1044 dprintk("removing link for cpu %u\n", j);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 5d3a04ba6ad2..fe565ee43757 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -497,7 +497,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
497 return rc; 497 return rc;
498 } 498 }
499 499
500 for_each_cpu_mask(j, policy->cpus) { 500 for_each_cpu_mask_nr(j, policy->cpus) {
501 struct cpu_dbs_info_s *j_dbs_info; 501 struct cpu_dbs_info_s *j_dbs_info;
502 j_dbs_info = &per_cpu(cpu_dbs_info, j); 502 j_dbs_info = &per_cpu(cpu_dbs_info, j);
503 j_dbs_info->cur_policy = policy; 503 j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index d2af20dda382..33855cb3cf16 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -367,7 +367,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
367 367
368 /* Get Idle Time */ 368 /* Get Idle Time */
369 idle_ticks = UINT_MAX; 369 idle_ticks = UINT_MAX;
370 for_each_cpu_mask(j, policy->cpus) { 370 for_each_cpu_mask_nr(j, policy->cpus) {
371 cputime64_t total_idle_ticks; 371 cputime64_t total_idle_ticks;
372 unsigned int tmp_idle_ticks; 372 unsigned int tmp_idle_ticks;
373 struct cpu_dbs_info_s *j_dbs_info; 373 struct cpu_dbs_info_s *j_dbs_info;
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
521 return rc; 521 return rc;
522 } 522 }
523 523
524 for_each_cpu_mask(j, policy->cpus) { 524 for_each_cpu_mask_nr(j, policy->cpus) {
525 struct cpu_dbs_info_s *j_dbs_info; 525 struct cpu_dbs_info_s *j_dbs_info;
526 j_dbs_info = &per_cpu(cpu_dbs_info, j); 526 j_dbs_info = &per_cpu(cpu_dbs_info, j);
527 j_dbs_info->cur_policy = policy; 527 j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index cb2ac01a41a1..32244aa7cc0c 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -30,16 +30,18 @@
30/** 30/**
31 * A few values needed by the userspace governor 31 * A few values needed by the userspace governor
32 */ 32 */
33static unsigned int cpu_max_freq[NR_CPUS]; 33static DEFINE_PER_CPU(unsigned int, cpu_max_freq);
34static unsigned int cpu_min_freq[NR_CPUS]; 34static DEFINE_PER_CPU(unsigned int, cpu_min_freq);
35static unsigned int cpu_cur_freq[NR_CPUS]; /* current CPU freq */ 35static DEFINE_PER_CPU(unsigned int, cpu_cur_freq); /* current CPU freq */
36static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */ 36static DEFINE_PER_CPU(unsigned int, cpu_set_freq); /* CPU freq desired by
37static unsigned int cpu_is_managed[NR_CPUS]; 37 userspace */
38static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
38 39
39static DEFINE_MUTEX (userspace_mutex); 40static DEFINE_MUTEX (userspace_mutex);
40static int cpus_using_userspace_governor; 41static int cpus_using_userspace_governor;
41 42
42#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) 43#define dprintk(msg...) \
44 cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
43 45
44/* keep track of frequency transitions */ 46/* keep track of frequency transitions */
45static int 47static int
@@ -48,12 +50,12 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
48{ 50{
49 struct cpufreq_freqs *freq = data; 51 struct cpufreq_freqs *freq = data;
50 52
51 if (!cpu_is_managed[freq->cpu]) 53 if (!per_cpu(cpu_is_managed, freq->cpu))
52 return 0; 54 return 0;
53 55
54 dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", 56 dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n",
55 freq->cpu, freq->new); 57 freq->cpu, freq->new);
56 cpu_cur_freq[freq->cpu] = freq->new; 58 per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
57 59
58 return 0; 60 return 0;
59} 61}
@@ -77,15 +79,15 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
77 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); 79 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
78 80
79 mutex_lock(&userspace_mutex); 81 mutex_lock(&userspace_mutex);
80 if (!cpu_is_managed[policy->cpu]) 82 if (!per_cpu(cpu_is_managed, policy->cpu))
81 goto err; 83 goto err;
82 84
83 cpu_set_freq[policy->cpu] = freq; 85 per_cpu(cpu_set_freq, policy->cpu) = freq;
84 86
85 if (freq < cpu_min_freq[policy->cpu]) 87 if (freq < per_cpu(cpu_min_freq, policy->cpu))
86 freq = cpu_min_freq[policy->cpu]; 88 freq = per_cpu(cpu_min_freq, policy->cpu);
87 if (freq > cpu_max_freq[policy->cpu]) 89 if (freq > per_cpu(cpu_max_freq, policy->cpu))
88 freq = cpu_max_freq[policy->cpu]; 90 freq = per_cpu(cpu_max_freq, policy->cpu);
89 91
90 /* 92 /*
91 * We're safe from concurrent calls to ->target() here 93 * We're safe from concurrent calls to ->target() here
@@ -104,7 +106,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
104 106
105static ssize_t show_speed(struct cpufreq_policy *policy, char *buf) 107static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
106{ 108{
107 return sprintf(buf, "%u\n", cpu_cur_freq[policy->cpu]); 109 return sprintf(buf, "%u\n", per_cpu(cpu_cur_freq, policy->cpu));
108} 110}
109 111
110static int cpufreq_governor_userspace(struct cpufreq_policy *policy, 112static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
@@ -127,12 +129,17 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
127 } 129 }
128 cpus_using_userspace_governor++; 130 cpus_using_userspace_governor++;
129 131
130 cpu_is_managed[cpu] = 1; 132 per_cpu(cpu_is_managed, cpu) = 1;
131 cpu_min_freq[cpu] = policy->min; 133 per_cpu(cpu_min_freq, cpu) = policy->min;
132 cpu_max_freq[cpu] = policy->max; 134 per_cpu(cpu_max_freq, cpu) = policy->max;
133 cpu_cur_freq[cpu] = policy->cur; 135 per_cpu(cpu_cur_freq, cpu) = policy->cur;
134 cpu_set_freq[cpu] = policy->cur; 136 per_cpu(cpu_set_freq, cpu) = policy->cur;
135 dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]); 137 dprintk("managing cpu %u started "
138 "(%u - %u kHz, currently %u kHz)\n",
139 cpu,
140 per_cpu(cpu_min_freq, cpu),
141 per_cpu(cpu_max_freq, cpu),
142 per_cpu(cpu_cur_freq, cpu));
136 143
137 mutex_unlock(&userspace_mutex); 144 mutex_unlock(&userspace_mutex);
138 break; 145 break;
@@ -145,34 +152,34 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
145 CPUFREQ_TRANSITION_NOTIFIER); 152 CPUFREQ_TRANSITION_NOTIFIER);
146 } 153 }
147 154
148 cpu_is_managed[cpu] = 0; 155 per_cpu(cpu_is_managed, cpu) = 0;
149 cpu_min_freq[cpu] = 0; 156 per_cpu(cpu_min_freq, cpu) = 0;
150 cpu_max_freq[cpu] = 0; 157 per_cpu(cpu_max_freq, cpu) = 0;
151 cpu_set_freq[cpu] = 0; 158 per_cpu(cpu_set_freq, cpu) = 0;
152 dprintk("managing cpu %u stopped\n", cpu); 159 dprintk("managing cpu %u stopped\n", cpu);
153 mutex_unlock(&userspace_mutex); 160 mutex_unlock(&userspace_mutex);
154 break; 161 break;
155 case CPUFREQ_GOV_LIMITS: 162 case CPUFREQ_GOV_LIMITS:
156 mutex_lock(&userspace_mutex); 163 mutex_lock(&userspace_mutex);
157 dprintk("limit event for cpu %u: %u - %u kHz," 164 dprintk("limit event for cpu %u: %u - %u kHz, "
158 "currently %u kHz, last set to %u kHz\n", 165 "currently %u kHz, last set to %u kHz\n",
159 cpu, policy->min, policy->max, 166 cpu, policy->min, policy->max,
160 cpu_cur_freq[cpu], cpu_set_freq[cpu]); 167 per_cpu(cpu_cur_freq, cpu),
161 if (policy->max < cpu_set_freq[cpu]) { 168 per_cpu(cpu_set_freq, cpu));
169 if (policy->max < per_cpu(cpu_set_freq, cpu)) {
162 __cpufreq_driver_target(policy, policy->max, 170 __cpufreq_driver_target(policy, policy->max,
163 CPUFREQ_RELATION_H); 171 CPUFREQ_RELATION_H);
164 } 172 } else if (policy->min > per_cpu(cpu_set_freq, cpu)) {
165 else if (policy->min > cpu_set_freq[cpu]) {
166 __cpufreq_driver_target(policy, policy->min, 173 __cpufreq_driver_target(policy, policy->min,
167 CPUFREQ_RELATION_L); 174 CPUFREQ_RELATION_L);
168 } 175 } else {
169 else { 176 __cpufreq_driver_target(policy,
170 __cpufreq_driver_target(policy, cpu_set_freq[cpu], 177 per_cpu(cpu_set_freq, cpu),
171 CPUFREQ_RELATION_L); 178 CPUFREQ_RELATION_L);
172 } 179 }
173 cpu_min_freq[cpu] = policy->min; 180 per_cpu(cpu_min_freq, cpu) = policy->min;
174 cpu_max_freq[cpu] = policy->max; 181 per_cpu(cpu_max_freq, cpu) = policy->max;
175 cpu_cur_freq[cpu] = policy->cur; 182 per_cpu(cpu_cur_freq, cpu) = policy->cur;
176 mutex_unlock(&userspace_mutex); 183 mutex_unlock(&userspace_mutex);
177 break; 184 break;
178 } 185 }
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 25918f7dfd0f..0b624e927a6f 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -254,6 +254,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
254static int smi_request(struct smi_cmd *smi_cmd) 254static int smi_request(struct smi_cmd *smi_cmd)
255{ 255{
256 cpumask_t old_mask; 256 cpumask_t old_mask;
257 cpumask_of_cpu_ptr(new_mask, 0);
257 int ret = 0; 258 int ret = 0;
258 259
259 if (smi_cmd->magic != SMI_CMD_MAGIC) { 260 if (smi_cmd->magic != SMI_CMD_MAGIC) {
@@ -264,7 +265,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
264 265
265 /* SMI requires CPU 0 */ 266 /* SMI requires CPU 0 */
266 old_mask = current->cpus_allowed; 267 old_mask = current->cpus_allowed;
267 set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); 268 set_cpus_allowed_ptr(current, new_mask);
268 if (smp_processor_id() != 0) { 269 if (smp_processor_id() != 0) {
269 dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", 270 dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
270 __func__); 271 __func__);
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 0792d930c481..7a64aa9b51b6 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -646,8 +646,8 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
646 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); 646 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
647 647
648 spin_lock_irqsave(&pool->last_cpu_lock, flags); 648 spin_lock_irqsave(&pool->last_cpu_lock, flags);
649 cpu = next_cpu(pool->last_cpu, cpu_online_map); 649 cpu = next_cpu_nr(pool->last_cpu, cpu_online_map);
650 if (cpu == NR_CPUS) 650 if (cpu >= nr_cpu_ids)
651 cpu = first_cpu(cpu_online_map); 651 cpu = first_cpu(cpu_online_map);
652 pool->last_cpu = cpu; 652 pool->last_cpu = cpu;
653 spin_unlock_irqrestore(&pool->last_cpu_lock, flags); 653 spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 08256ed0d9a6..579b01ff82d4 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -229,10 +229,11 @@ xpc_hb_checker(void *ignore)
229 int last_IRQ_count = 0; 229 int last_IRQ_count = 0;
230 int new_IRQ_count; 230 int new_IRQ_count;
231 int force_IRQ = 0; 231 int force_IRQ = 0;
232 cpumask_of_cpu_ptr(cpumask, XPC_HB_CHECK_CPU);
232 233
233 /* this thread was marked active by xpc_hb_init() */ 234 /* this thread was marked active by xpc_hb_init() */
234 235
235 set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU)); 236 set_cpus_allowed_ptr(current, cpumask);
236 237
237 /* set our heartbeating to other partitions into motion */ 238 /* set our heartbeating to other partitions into motion */
238 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); 239 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h
index 196d63c28aa4..bb1c09f7a76c 100644
--- a/include/asm-x86/ipi.h
+++ b/include/asm-x86/ipi.h
@@ -122,7 +122,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
122 * - mbligh 122 * - mbligh
123 */ 123 */
124 local_irq_save(flags); 124 local_irq_save(flags);
125 for_each_cpu_mask(query_cpu, mask) { 125 for_each_cpu_mask_nr(query_cpu, mask) {
126 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), 126 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
127 vector, APIC_DEST_PHYSICAL); 127 vector, APIC_DEST_PHYSICAL);
128 } 128 }
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 55402d2ab938..f92315ed68e1 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -134,7 +134,7 @@ extern __u32 cleared_cpu_caps[NCAPINTS];
134#ifdef CONFIG_SMP 134#ifdef CONFIG_SMP
135DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); 135DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
136#define cpu_data(cpu) per_cpu(cpu_info, cpu) 136#define cpu_data(cpu) per_cpu(cpu_info, cpu)
137#define current_cpu_data cpu_data(smp_processor_id()) 137#define current_cpu_data __get_cpu_var(cpu_info)
138#else 138#else
139#define cpu_data(cpu) boot_cpu_data 139#define cpu_data(cpu) boot_cpu_data
140#define current_cpu_data boot_cpu_data 140#define current_cpu_data boot_cpu_data
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index c24875bd9c5b..30d59d1d0626 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -17,6 +17,20 @@
17 * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c. 17 * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c.
18 * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c. 18 * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c.
19 * 19 *
20 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
21 * Note: The alternate operations with the suffix "_nr" are used
22 * to limit the range of the loop to nr_cpu_ids instead of
23 * NR_CPUS when NR_CPUS > 64 for performance reasons.
24 * If NR_CPUS is <= 64 then most assembler bitmask
25 * operators execute faster with a constant range, so
26 * the operator will continue to use NR_CPUS.
27 *
28 * Another consideration is that nr_cpu_ids is initialized
29 * to NR_CPUS and isn't lowered until the possible cpus are
30 * discovered (including any disabled cpus). So early uses
31 * will span the entire range of NR_CPUS.
32 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
33 *
20 * The available cpumask operations are: 34 * The available cpumask operations are:
21 * 35 *
22 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask 36 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask
@@ -38,18 +52,60 @@
38 * int cpus_empty(mask) Is mask empty (no bits sets)? 52 * int cpus_empty(mask) Is mask empty (no bits sets)?
39 * int cpus_full(mask) Is mask full (all bits sets)? 53 * int cpus_full(mask) Is mask full (all bits sets)?
40 * int cpus_weight(mask) Hamming weigh - number of set bits 54 * int cpus_weight(mask) Hamming weigh - number of set bits
55 * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS
41 * 56 *
42 * void cpus_shift_right(dst, src, n) Shift right 57 * void cpus_shift_right(dst, src, n) Shift right
43 * void cpus_shift_left(dst, src, n) Shift left 58 * void cpus_shift_left(dst, src, n) Shift left
44 * 59 *
45 * int first_cpu(mask) Number lowest set bit, or NR_CPUS 60 * int first_cpu(mask) Number lowest set bit, or NR_CPUS
46 * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS 61 * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS
62 * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids
47 * 63 *
48 * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set 64 * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
65 *ifdef CONFIG_HAS_CPUMASK_OF_CPU
66 * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t *v
67 * cpumask_of_cpu_ptr_next(v, cpu) Sets v = &cpumask_of_cpu_map[cpu]
68 * cpumask_of_cpu_ptr(v, cpu) Combines above two operations
69 *else
70 * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t _v and *v = &_v
71 * cpumask_of_cpu_ptr_next(v, cpu) Sets _v = cpumask_of_cpu(cpu)
72 * cpumask_of_cpu_ptr(v, cpu) Combines above two operations
73 *endif
49 * CPU_MASK_ALL Initializer - all bits set 74 * CPU_MASK_ALL Initializer - all bits set
50 * CPU_MASK_NONE Initializer - no bits set 75 * CPU_MASK_NONE Initializer - no bits set
51 * unsigned long *cpus_addr(mask) Array of unsigned long's in mask 76 * unsigned long *cpus_addr(mask) Array of unsigned long's in mask
52 * 77 *
78 * CPUMASK_ALLOC kmalloc's a structure that is a composite of many cpumask_t
79 * variables, and CPUMASK_PTR provides pointers to each field.
80 *
81 * The structure should be defined something like this:
82 * struct my_cpumasks {
83 * cpumask_t mask1;
84 * cpumask_t mask2;
85 * };
86 *
87 * Usage is then:
88 * CPUMASK_ALLOC(my_cpumasks);
89 * CPUMASK_PTR(mask1, my_cpumasks);
90 * CPUMASK_PTR(mask2, my_cpumasks);
91 *
92 * --- DO NOT reference cpumask_t pointers until this check ---
93 * if (my_cpumasks == NULL)
94 * "kmalloc failed"...
95 *
96 * References are now pointers to the cpumask_t variables (*mask1, ...)
97 *
98 *if NR_CPUS > BITS_PER_LONG
99 * CPUMASK_ALLOC(m) Declares and allocates struct m *m =
100 * kmalloc(sizeof(*m), GFP_KERNEL)
101 * CPUMASK_FREE(m) Macro for kfree(m)
102 *else
103 * CPUMASK_ALLOC(m) Declares struct m _m, *m = &_m
104 * CPUMASK_FREE(m) Nop
105 *endif
106 * CPUMASK_PTR(v, m) Declares cpumask_t *v = &(m->v)
107 * ------------------------------------------------------------------------
108 *
53 * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing 109 * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
54 * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask 110 * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask
55 * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing 111 * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
@@ -59,7 +115,8 @@
59 * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap 115 * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap
60 * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz 116 * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz
61 * 117 *
62 * for_each_cpu_mask(cpu, mask) for-loop cpu over mask 118 * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS
119 * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids
63 * 120 *
64 * int num_online_cpus() Number of online CPUs 121 * int num_online_cpus() Number of online CPUs
65 * int num_possible_cpus() Number of all possible CPUs 122 * int num_possible_cpus() Number of all possible CPUs
@@ -216,23 +273,19 @@ static inline void __cpus_shift_left(cpumask_t *dstp,
216 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); 273 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
217} 274}
218 275
219#ifdef CONFIG_SMP
220int __first_cpu(const cpumask_t *srcp);
221#define first_cpu(src) __first_cpu(&(src))
222int __next_cpu(int n, const cpumask_t *srcp);
223#define next_cpu(n, src) __next_cpu((n), &(src))
224#else
225#define first_cpu(src) ({ (void)(src); 0; })
226#define next_cpu(n, src) ({ (void)(src); 1; })
227#endif
228 276
229#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP 277#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
230extern cpumask_t *cpumask_of_cpu_map; 278extern cpumask_t *cpumask_of_cpu_map;
231#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) 279#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu])
232 280#define cpumask_of_cpu_ptr(v, cpu) \
281 const cpumask_t *v = &cpumask_of_cpu(cpu)
282#define cpumask_of_cpu_ptr_declare(v) \
283 const cpumask_t *v
284#define cpumask_of_cpu_ptr_next(v, cpu) \
285 v = &cpumask_of_cpu(cpu)
233#else 286#else
234#define cpumask_of_cpu(cpu) \ 287#define cpumask_of_cpu(cpu) \
235(*({ \ 288({ \
236 typeof(_unused_cpumask_arg_) m; \ 289 typeof(_unused_cpumask_arg_) m; \
237 if (sizeof(m) == sizeof(unsigned long)) { \ 290 if (sizeof(m) == sizeof(unsigned long)) { \
238 m.bits[0] = 1UL<<(cpu); \ 291 m.bits[0] = 1UL<<(cpu); \
@@ -240,8 +293,16 @@ extern cpumask_t *cpumask_of_cpu_map;
240 cpus_clear(m); \ 293 cpus_clear(m); \
241 cpu_set((cpu), m); \ 294 cpu_set((cpu), m); \
242 } \ 295 } \
243 &m; \ 296 m; \
244})) 297})
298#define cpumask_of_cpu_ptr(v, cpu) \
299 cpumask_t _##v = cpumask_of_cpu(cpu); \
300 const cpumask_t *v = &_##v
301#define cpumask_of_cpu_ptr_declare(v) \
302 cpumask_t _##v; \
303 const cpumask_t *v = &_##v
304#define cpumask_of_cpu_ptr_next(v, cpu) \
305 _##v = cpumask_of_cpu(cpu)
245#endif 306#endif
246 307
247#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) 308#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
@@ -281,6 +342,15 @@ extern cpumask_t cpu_mask_all;
281 342
282#define cpus_addr(src) ((src).bits) 343#define cpus_addr(src) ((src).bits)
283 344
345#if NR_CPUS > BITS_PER_LONG
346#define CPUMASK_ALLOC(m) struct m *m = kmalloc(sizeof(*m), GFP_KERNEL)
347#define CPUMASK_FREE(m) kfree(m)
348#else
349#define CPUMASK_ALLOC(m) struct m _m, *m = &_m
350#define CPUMASK_FREE(m)
351#endif
352#define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v)
353
284#define cpumask_scnprintf(buf, len, src) \ 354#define cpumask_scnprintf(buf, len, src) \
285 __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) 355 __cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
286static inline int __cpumask_scnprintf(char *buf, int len, 356static inline int __cpumask_scnprintf(char *buf, int len,
@@ -343,20 +413,49 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
343 bitmap_fold(dstp->bits, origp->bits, sz, nbits); 413 bitmap_fold(dstp->bits, origp->bits, sz, nbits);
344} 414}
345 415
346#if NR_CPUS > 1 416#if NR_CPUS == 1
347#define for_each_cpu_mask(cpu, mask) \ 417
348 for ((cpu) = first_cpu(mask); \ 418#define nr_cpu_ids 1
349 (cpu) < NR_CPUS; \ 419#define first_cpu(src) ({ (void)(src); 0; })
350 (cpu) = next_cpu((cpu), (mask))) 420#define next_cpu(n, src) ({ (void)(src); 1; })
351#else /* NR_CPUS == 1 */ 421#define any_online_cpu(mask) 0
352#define for_each_cpu_mask(cpu, mask) \ 422#define for_each_cpu_mask(cpu, mask) \
353 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) 423 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
354#endif /* NR_CPUS */ 424
425#else /* NR_CPUS > 1 */
426
427extern int nr_cpu_ids;
428int __first_cpu(const cpumask_t *srcp);
429int __next_cpu(int n, const cpumask_t *srcp);
430int __any_online_cpu(const cpumask_t *mask);
431
432#define first_cpu(src) __first_cpu(&(src))
433#define next_cpu(n, src) __next_cpu((n), &(src))
434#define any_online_cpu(mask) __any_online_cpu(&(mask))
435#define for_each_cpu_mask(cpu, mask) \
436 for ((cpu) = -1; \
437 (cpu) = next_cpu((cpu), (mask)), \
438 (cpu) < NR_CPUS; )
439#endif
440
441#if NR_CPUS <= 64
355 442
356#define next_cpu_nr(n, src) next_cpu(n, src) 443#define next_cpu_nr(n, src) next_cpu(n, src)
357#define cpus_weight_nr(cpumask) cpus_weight(cpumask) 444#define cpus_weight_nr(cpumask) cpus_weight(cpumask)
358#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) 445#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
359 446
447#else /* NR_CPUS > 64 */
448
449int __next_cpu_nr(int n, const cpumask_t *srcp);
450#define next_cpu_nr(n, src) __next_cpu_nr((n), &(src))
451#define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids)
452#define for_each_cpu_mask_nr(cpu, mask) \
453 for ((cpu) = -1; \
454 (cpu) = next_cpu_nr((cpu), (mask)), \
455 (cpu) < nr_cpu_ids; )
456
457#endif /* NR_CPUS > 64 */
458
360/* 459/*
361 * The following particular system cpumasks and operations manage 460 * The following particular system cpumasks and operations manage
362 * possible, present and online cpus. Each of them is a fixed size 461 * possible, present and online cpus. Each of them is a fixed size
@@ -418,9 +517,9 @@ extern cpumask_t cpu_online_map;
418extern cpumask_t cpu_present_map; 517extern cpumask_t cpu_present_map;
419 518
420#if NR_CPUS > 1 519#if NR_CPUS > 1
421#define num_online_cpus() cpus_weight(cpu_online_map) 520#define num_online_cpus() cpus_weight_nr(cpu_online_map)
422#define num_possible_cpus() cpus_weight(cpu_possible_map) 521#define num_possible_cpus() cpus_weight_nr(cpu_possible_map)
423#define num_present_cpus() cpus_weight(cpu_present_map) 522#define num_present_cpus() cpus_weight_nr(cpu_present_map)
424#define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) 523#define cpu_online(cpu) cpu_isset((cpu), cpu_online_map)
425#define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) 524#define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map)
426#define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) 525#define cpu_present(cpu) cpu_isset((cpu), cpu_present_map)
@@ -435,17 +534,8 @@ extern cpumask_t cpu_present_map;
435 534
436#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) 535#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
437 536
438#ifdef CONFIG_SMP 537#define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map)
439extern int nr_cpu_ids; 538#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map)
440#define any_online_cpu(mask) __any_online_cpu(&(mask)) 539#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map)
441int __any_online_cpu(const cpumask_t *mask);
442#else
443#define nr_cpu_ids 1
444#define any_online_cpu(mask) 0
445#endif
446
447#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
448#define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map)
449#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
450 540
451#endif /* __LINUX_CPUMASK_H */ 541#endif /* __LINUX_CPUMASK_H */
diff --git a/kernel/cpu.c b/kernel/cpu.c
index cfb1d43ab801..d26d0b095b3b 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -413,7 +413,7 @@ void __ref enable_nonboot_cpus(void)
413 goto out; 413 goto out;
414 414
415 printk("Enabling non-boot CPUs ...\n"); 415 printk("Enabling non-boot CPUs ...\n");
416 for_each_cpu_mask(cpu, frozen_cpus) { 416 for_each_cpu_mask_nr(cpu, frozen_cpus) {
417 error = _cpu_up(cpu, 1); 417 error = _cpu_up(cpu, 1);
418 if (!error) { 418 if (!error) {
419 printk("CPU%d is up\n", cpu); 419 printk("CPU%d is up\n", cpu);
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index 16eeeaa9d618..6f8696c502f4 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -106,7 +106,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
106 */ 106 */
107 cpus_and(cpumask, rcp->cpumask, cpu_online_map); 107 cpus_and(cpumask, rcp->cpumask, cpu_online_map);
108 cpu_clear(rdp->cpu, cpumask); 108 cpu_clear(rdp->cpu, cpumask);
109 for_each_cpu_mask(cpu, cpumask) 109 for_each_cpu_mask_nr(cpu, cpumask)
110 smp_send_reschedule(cpu); 110 smp_send_reschedule(cpu);
111 } 111 }
112} 112}
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 6f62b77d93c4..27827931ca0d 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -756,7 +756,7 @@ rcu_try_flip_idle(void)
756 756
757 /* Now ask each CPU for acknowledgement of the flip. */ 757 /* Now ask each CPU for acknowledgement of the flip. */
758 758
759 for_each_cpu_mask(cpu, rcu_cpu_online_map) { 759 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
760 per_cpu(rcu_flip_flag, cpu) = rcu_flipped; 760 per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
761 dyntick_save_progress_counter(cpu); 761 dyntick_save_progress_counter(cpu);
762 } 762 }
@@ -774,7 +774,7 @@ rcu_try_flip_waitack(void)
774 int cpu; 774 int cpu;
775 775
776 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); 776 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
777 for_each_cpu_mask(cpu, rcu_cpu_online_map) 777 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
778 if (rcu_try_flip_waitack_needed(cpu) && 778 if (rcu_try_flip_waitack_needed(cpu) &&
779 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { 779 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
780 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); 780 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
@@ -806,7 +806,7 @@ rcu_try_flip_waitzero(void)
806 /* Check to see if the sum of the "last" counters is zero. */ 806 /* Check to see if the sum of the "last" counters is zero. */
807 807
808 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); 808 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
809 for_each_cpu_mask(cpu, rcu_cpu_online_map) 809 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
810 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; 810 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
811 if (sum != 0) { 811 if (sum != 0) {
812 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); 812 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
@@ -821,7 +821,7 @@ rcu_try_flip_waitzero(void)
821 smp_mb(); /* ^^^^^^^^^^^^ */ 821 smp_mb(); /* ^^^^^^^^^^^^ */
822 822
823 /* Call for a memory barrier from each CPU. */ 823 /* Call for a memory barrier from each CPU. */
824 for_each_cpu_mask(cpu, rcu_cpu_online_map) { 824 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
825 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; 825 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
826 dyntick_save_progress_counter(cpu); 826 dyntick_save_progress_counter(cpu);
827 } 827 }
@@ -841,7 +841,7 @@ rcu_try_flip_waitmb(void)
841 int cpu; 841 int cpu;
842 842
843 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); 843 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
844 for_each_cpu_mask(cpu, rcu_cpu_online_map) 844 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
845 if (rcu_try_flip_waitmb_needed(cpu) && 845 if (rcu_try_flip_waitmb_needed(cpu) &&
846 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { 846 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
847 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); 847 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
diff --git a/kernel/sched.c b/kernel/sched.c
index 99e6d850ecab..a6d0a7f3f7c3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2108,7 +2108,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2108 /* Tally up the load of all CPUs in the group */ 2108 /* Tally up the load of all CPUs in the group */
2109 avg_load = 0; 2109 avg_load = 0;
2110 2110
2111 for_each_cpu_mask(i, group->cpumask) { 2111 for_each_cpu_mask_nr(i, group->cpumask) {
2112 /* Bias balancing toward cpus of our domain */ 2112 /* Bias balancing toward cpus of our domain */
2113 if (local_group) 2113 if (local_group)
2114 load = source_load(i, load_idx); 2114 load = source_load(i, load_idx);
@@ -2150,7 +2150,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
2150 /* Traverse only the allowed CPUs */ 2150 /* Traverse only the allowed CPUs */
2151 cpus_and(*tmp, group->cpumask, p->cpus_allowed); 2151 cpus_and(*tmp, group->cpumask, p->cpus_allowed);
2152 2152
2153 for_each_cpu_mask(i, *tmp) { 2153 for_each_cpu_mask_nr(i, *tmp) {
2154 load = weighted_cpuload(i); 2154 load = weighted_cpuload(i);
2155 2155
2156 if (load < min_load || (load == min_load && i == this_cpu)) { 2156 if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -3168,7 +3168,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3168 max_cpu_load = 0; 3168 max_cpu_load = 0;
3169 min_cpu_load = ~0UL; 3169 min_cpu_load = ~0UL;
3170 3170
3171 for_each_cpu_mask(i, group->cpumask) { 3171 for_each_cpu_mask_nr(i, group->cpumask) {
3172 struct rq *rq; 3172 struct rq *rq;
3173 3173
3174 if (!cpu_isset(i, *cpus)) 3174 if (!cpu_isset(i, *cpus))
@@ -3447,7 +3447,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3447 unsigned long max_load = 0; 3447 unsigned long max_load = 0;
3448 int i; 3448 int i;
3449 3449
3450 for_each_cpu_mask(i, group->cpumask) { 3450 for_each_cpu_mask_nr(i, group->cpumask) {
3451 unsigned long wl; 3451 unsigned long wl;
3452 3452
3453 if (!cpu_isset(i, *cpus)) 3453 if (!cpu_isset(i, *cpus))
@@ -3989,7 +3989,7 @@ static void run_rebalance_domains(struct softirq_action *h)
3989 int balance_cpu; 3989 int balance_cpu;
3990 3990
3991 cpu_clear(this_cpu, cpus); 3991 cpu_clear(this_cpu, cpus);
3992 for_each_cpu_mask(balance_cpu, cpus) { 3992 for_each_cpu_mask_nr(balance_cpu, cpus) {
3993 /* 3993 /*
3994 * If this cpu gets work to do, stop the load balancing 3994 * If this cpu gets work to do, stop the load balancing
3995 * work being done for other cpus. Next load 3995 * work being done for other cpus. Next load
@@ -6802,7 +6802,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6802 6802
6803 cpus_clear(*covered); 6803 cpus_clear(*covered);
6804 6804
6805 for_each_cpu_mask(i, *span) { 6805 for_each_cpu_mask_nr(i, *span) {
6806 struct sched_group *sg; 6806 struct sched_group *sg;
6807 int group = group_fn(i, cpu_map, &sg, tmpmask); 6807 int group = group_fn(i, cpu_map, &sg, tmpmask);
6808 int j; 6808 int j;
@@ -6813,7 +6813,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6813 cpus_clear(sg->cpumask); 6813 cpus_clear(sg->cpumask);
6814 sg->__cpu_power = 0; 6814 sg->__cpu_power = 0;
6815 6815
6816 for_each_cpu_mask(j, *span) { 6816 for_each_cpu_mask_nr(j, *span) {
6817 if (group_fn(j, cpu_map, NULL, tmpmask) != group) 6817 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
6818 continue; 6818 continue;
6819 6819
@@ -7013,7 +7013,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
7013 if (!sg) 7013 if (!sg)
7014 return; 7014 return;
7015 do { 7015 do {
7016 for_each_cpu_mask(j, sg->cpumask) { 7016 for_each_cpu_mask_nr(j, sg->cpumask) {
7017 struct sched_domain *sd; 7017 struct sched_domain *sd;
7018 7018
7019 sd = &per_cpu(phys_domains, j); 7019 sd = &per_cpu(phys_domains, j);
@@ -7038,7 +7038,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
7038{ 7038{
7039 int cpu, i; 7039 int cpu, i;
7040 7040
7041 for_each_cpu_mask(cpu, *cpu_map) { 7041 for_each_cpu_mask_nr(cpu, *cpu_map) {
7042 struct sched_group **sched_group_nodes 7042 struct sched_group **sched_group_nodes
7043 = sched_group_nodes_bycpu[cpu]; 7043 = sched_group_nodes_bycpu[cpu];
7044 7044
@@ -7277,7 +7277,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7277 /* 7277 /*
7278 * Set up domains for cpus specified by the cpu_map. 7278 * Set up domains for cpus specified by the cpu_map.
7279 */ 7279 */
7280 for_each_cpu_mask(i, *cpu_map) { 7280 for_each_cpu_mask_nr(i, *cpu_map) {
7281 struct sched_domain *sd = NULL, *p; 7281 struct sched_domain *sd = NULL, *p;
7282 SCHED_CPUMASK_VAR(nodemask, allmasks); 7282 SCHED_CPUMASK_VAR(nodemask, allmasks);
7283 7283
@@ -7344,7 +7344,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7344 7344
7345#ifdef CONFIG_SCHED_SMT 7345#ifdef CONFIG_SCHED_SMT
7346 /* Set up CPU (sibling) groups */ 7346 /* Set up CPU (sibling) groups */
7347 for_each_cpu_mask(i, *cpu_map) { 7347 for_each_cpu_mask_nr(i, *cpu_map) {
7348 SCHED_CPUMASK_VAR(this_sibling_map, allmasks); 7348 SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
7349 SCHED_CPUMASK_VAR(send_covered, allmasks); 7349 SCHED_CPUMASK_VAR(send_covered, allmasks);
7350 7350
@@ -7361,7 +7361,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7361 7361
7362#ifdef CONFIG_SCHED_MC 7362#ifdef CONFIG_SCHED_MC
7363 /* Set up multi-core groups */ 7363 /* Set up multi-core groups */
7364 for_each_cpu_mask(i, *cpu_map) { 7364 for_each_cpu_mask_nr(i, *cpu_map) {
7365 SCHED_CPUMASK_VAR(this_core_map, allmasks); 7365 SCHED_CPUMASK_VAR(this_core_map, allmasks);
7366 SCHED_CPUMASK_VAR(send_covered, allmasks); 7366 SCHED_CPUMASK_VAR(send_covered, allmasks);
7367 7367
@@ -7428,7 +7428,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7428 goto error; 7428 goto error;
7429 } 7429 }
7430 sched_group_nodes[i] = sg; 7430 sched_group_nodes[i] = sg;
7431 for_each_cpu_mask(j, *nodemask) { 7431 for_each_cpu_mask_nr(j, *nodemask) {
7432 struct sched_domain *sd; 7432 struct sched_domain *sd;
7433 7433
7434 sd = &per_cpu(node_domains, j); 7434 sd = &per_cpu(node_domains, j);
@@ -7474,21 +7474,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7474 7474
7475 /* Calculate CPU power for physical packages and nodes */ 7475 /* Calculate CPU power for physical packages and nodes */
7476#ifdef CONFIG_SCHED_SMT 7476#ifdef CONFIG_SCHED_SMT
7477 for_each_cpu_mask(i, *cpu_map) { 7477 for_each_cpu_mask_nr(i, *cpu_map) {
7478 struct sched_domain *sd = &per_cpu(cpu_domains, i); 7478 struct sched_domain *sd = &per_cpu(cpu_domains, i);
7479 7479
7480 init_sched_groups_power(i, sd); 7480 init_sched_groups_power(i, sd);
7481 } 7481 }
7482#endif 7482#endif
7483#ifdef CONFIG_SCHED_MC 7483#ifdef CONFIG_SCHED_MC
7484 for_each_cpu_mask(i, *cpu_map) { 7484 for_each_cpu_mask_nr(i, *cpu_map) {
7485 struct sched_domain *sd = &per_cpu(core_domains, i); 7485 struct sched_domain *sd = &per_cpu(core_domains, i);
7486 7486
7487 init_sched_groups_power(i, sd); 7487 init_sched_groups_power(i, sd);
7488 } 7488 }
7489#endif 7489#endif
7490 7490
7491 for_each_cpu_mask(i, *cpu_map) { 7491 for_each_cpu_mask_nr(i, *cpu_map) {
7492 struct sched_domain *sd = &per_cpu(phys_domains, i); 7492 struct sched_domain *sd = &per_cpu(phys_domains, i);
7493 7493
7494 init_sched_groups_power(i, sd); 7494 init_sched_groups_power(i, sd);
@@ -7508,7 +7508,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7508#endif 7508#endif
7509 7509
7510 /* Attach the domains */ 7510 /* Attach the domains */
7511 for_each_cpu_mask(i, *cpu_map) { 7511 for_each_cpu_mask_nr(i, *cpu_map) {
7512 struct sched_domain *sd; 7512 struct sched_domain *sd;
7513#ifdef CONFIG_SCHED_SMT 7513#ifdef CONFIG_SCHED_SMT
7514 sd = &per_cpu(cpu_domains, i); 7514 sd = &per_cpu(cpu_domains, i);
@@ -7603,7 +7603,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
7603 7603
7604 unregister_sched_domain_sysctl(); 7604 unregister_sched_domain_sysctl();
7605 7605
7606 for_each_cpu_mask(i, *cpu_map) 7606 for_each_cpu_mask_nr(i, *cpu_map)
7607 cpu_attach_domain(NULL, &def_root_domain, i); 7607 cpu_attach_domain(NULL, &def_root_domain, i);
7608 synchronize_sched(); 7608 synchronize_sched();
7609 arch_destroy_sched_domains(cpu_map, &tmpmask); 7609 arch_destroy_sched_domains(cpu_map, &tmpmask);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f2aa987027d6..bb61fe26b62c 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1031,7 +1031,7 @@ static int wake_idle(int cpu, struct task_struct *p)
1031 || ((sd->flags & SD_WAKE_IDLE_FAR) 1031 || ((sd->flags & SD_WAKE_IDLE_FAR)
1032 && !task_hot(p, task_rq(p)->clock, sd))) { 1032 && !task_hot(p, task_rq(p)->clock, sd))) {
1033 cpus_and(tmp, sd->span, p->cpus_allowed); 1033 cpus_and(tmp, sd->span, p->cpus_allowed);
1034 for_each_cpu_mask(i, tmp) { 1034 for_each_cpu_mask_nr(i, tmp) {
1035 if (idle_cpu(i)) { 1035 if (idle_cpu(i)) {
1036 if (i != task_cpu(p)) { 1036 if (i != task_cpu(p)) {
1037 schedstat_inc(p, 1037 schedstat_inc(p,
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 47ceac9e8552..7c9614728c59 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -240,7 +240,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
240 240
241 spin_lock(&rt_b->rt_runtime_lock); 241 spin_lock(&rt_b->rt_runtime_lock);
242 rt_period = ktime_to_ns(rt_b->rt_period); 242 rt_period = ktime_to_ns(rt_b->rt_period);
243 for_each_cpu_mask(i, rd->span) { 243 for_each_cpu_mask_nr(i, rd->span) {
244 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 244 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
245 s64 diff; 245 s64 diff;
246 246
@@ -1107,7 +1107,7 @@ static int pull_rt_task(struct rq *this_rq)
1107 1107
1108 next = pick_next_task_rt(this_rq); 1108 next = pick_next_task_rt(this_rq);
1109 1109
1110 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) { 1110 for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
1111 if (this_cpu == cpu) 1111 if (this_cpu == cpu)
1112 continue; 1112 continue;
1113 1113
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index ba9b2054ecbd..738b411ff2d3 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -33,8 +33,9 @@ static int stopmachine(void *cpu)
33{ 33{
34 int irqs_disabled = 0; 34 int irqs_disabled = 0;
35 int prepared = 0; 35 int prepared = 0;
36 cpumask_of_cpu_ptr(cpumask, (int)(long)cpu);
36 37
37 set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu)); 38 set_cpus_allowed_ptr(current, cpumask);
38 39
39 /* Ack: we are alive */ 40 /* Ack: we are alive */
40 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ 41 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 4a23517169a6..06b17547f4e7 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
301 return -EINVAL; 301 return -EINVAL;
302 302
303 if (isadd == REGISTER) { 303 if (isadd == REGISTER) {
304 for_each_cpu_mask(cpu, mask) { 304 for_each_cpu_mask_nr(cpu, mask) {
305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, 305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
306 cpu_to_node(cpu)); 306 cpu_to_node(cpu));
307 if (!s) 307 if (!s)
@@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
320 320
321 /* Deregister or cleanup */ 321 /* Deregister or cleanup */
322cleanup: 322cleanup:
323 for_each_cpu_mask(cpu, mask) { 323 for_each_cpu_mask_nr(cpu, mask) {
324 listeners = &per_cpu(listener_array, cpu); 324 listeners = &per_cpu(listener_array, cpu);
325 down_write(&listeners->sem); 325 down_write(&listeners->sem);
326 list_for_each_entry_safe(s, tmp, &listeners->list, list) { 326 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index dadde5361f32..60ceabd53f2e 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -145,9 +145,9 @@ static void clocksource_watchdog(unsigned long data)
145 * Cycle through CPUs to check if the CPUs stay 145 * Cycle through CPUs to check if the CPUs stay
146 * synchronized to each other. 146 * synchronized to each other.
147 */ 147 */
148 int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map); 148 int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map);
149 149
150 if (next_cpu >= NR_CPUS) 150 if (next_cpu >= nr_cpu_ids)
151 next_cpu = first_cpu(cpu_online_map); 151 next_cpu = first_cpu(cpu_online_map);
152 watchdog_timer.expires += WATCHDOG_INTERVAL; 152 watchdog_timer.expires += WATCHDOG_INTERVAL;
153 add_timer_on(&watchdog_timer, next_cpu); 153 add_timer_on(&watchdog_timer, next_cpu);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f48d0f09d32f..31463d370b94 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -399,8 +399,7 @@ again:
399 mask = CPU_MASK_NONE; 399 mask = CPU_MASK_NONE;
400 now = ktime_get(); 400 now = ktime_get();
401 /* Find all expired events */ 401 /* Find all expired events */
402 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; 402 for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) {
403 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
404 td = &per_cpu(tick_cpu_device, cpu); 403 td = &per_cpu(tick_cpu_device, cpu);
405 if (td->evtdev->next_event.tv64 <= now.tv64) 404 if (td->evtdev->next_event.tv64 <= now.tv64)
406 cpu_set(cpu, mask); 405 cpu_set(cpu, mask);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 4f3886562b8c..bf43284d6855 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -135,7 +135,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
135 */ 135 */
136static void tick_setup_device(struct tick_device *td, 136static void tick_setup_device(struct tick_device *td,
137 struct clock_event_device *newdev, int cpu, 137 struct clock_event_device *newdev, int cpu,
138 cpumask_t cpumask) 138 const cpumask_t *cpumask)
139{ 139{
140 ktime_t next_event; 140 ktime_t next_event;
141 void (*handler)(struct clock_event_device *) = NULL; 141 void (*handler)(struct clock_event_device *) = NULL;
@@ -169,8 +169,8 @@ static void tick_setup_device(struct tick_device *td,
169 * When the device is not per cpu, pin the interrupt to the 169 * When the device is not per cpu, pin the interrupt to the
170 * current cpu: 170 * current cpu:
171 */ 171 */
172 if (!cpus_equal(newdev->cpumask, cpumask)) 172 if (!cpus_equal(newdev->cpumask, *cpumask))
173 irq_set_affinity(newdev->irq, cpumask); 173 irq_set_affinity(newdev->irq, *cpumask);
174 174
175 /* 175 /*
176 * When global broadcasting is active, check if the current 176 * When global broadcasting is active, check if the current
@@ -196,20 +196,20 @@ static int tick_check_new_device(struct clock_event_device *newdev)
196 struct tick_device *td; 196 struct tick_device *td;
197 int cpu, ret = NOTIFY_OK; 197 int cpu, ret = NOTIFY_OK;
198 unsigned long flags; 198 unsigned long flags;
199 cpumask_t cpumask; 199 cpumask_of_cpu_ptr_declare(cpumask);
200 200
201 spin_lock_irqsave(&tick_device_lock, flags); 201 spin_lock_irqsave(&tick_device_lock, flags);
202 202
203 cpu = smp_processor_id(); 203 cpu = smp_processor_id();
204 cpumask_of_cpu_ptr_next(cpumask, cpu);
204 if (!cpu_isset(cpu, newdev->cpumask)) 205 if (!cpu_isset(cpu, newdev->cpumask))
205 goto out_bc; 206 goto out_bc;
206 207
207 td = &per_cpu(tick_cpu_device, cpu); 208 td = &per_cpu(tick_cpu_device, cpu);
208 curdev = td->evtdev; 209 curdev = td->evtdev;
209 cpumask = cpumask_of_cpu(cpu);
210 210
211 /* cpu local device ? */ 211 /* cpu local device ? */
212 if (!cpus_equal(newdev->cpumask, cpumask)) { 212 if (!cpus_equal(newdev->cpumask, *cpumask)) {
213 213
214 /* 214 /*
215 * If the cpu affinity of the device interrupt can not 215 * If the cpu affinity of the device interrupt can not
@@ -222,7 +222,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
222 * If we have a cpu local device already, do not replace it 222 * If we have a cpu local device already, do not replace it
223 * by a non cpu local device 223 * by a non cpu local device
224 */ 224 */
225 if (curdev && cpus_equal(curdev->cpumask, cpumask)) 225 if (curdev && cpus_equal(curdev->cpumask, *cpumask))
226 goto out_bc; 226 goto out_bc;
227 } 227 }
228 228
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 2301e1e7c606..63528086337c 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -213,7 +213,9 @@ static void start_stack_timers(void)
213 int cpu; 213 int cpu;
214 214
215 for_each_online_cpu(cpu) { 215 for_each_online_cpu(cpu) {
216 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 216 cpumask_of_cpu_ptr(new_mask, cpu);
217
218 set_cpus_allowed_ptr(current, new_mask);
217 start_stack_timer(cpu); 219 start_stack_timer(cpu);
218 } 220 }
219 set_cpus_allowed_ptr(current, &saved_mask); 221 set_cpus_allowed_ptr(current, &saved_mask);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ce7799540c91..a6d36346d10a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -397,7 +397,7 @@ void flush_workqueue(struct workqueue_struct *wq)
397 might_sleep(); 397 might_sleep();
398 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 398 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
399 lock_release(&wq->lockdep_map, 1, _THIS_IP_); 399 lock_release(&wq->lockdep_map, 1, _THIS_IP_);
400 for_each_cpu_mask(cpu, *cpu_map) 400 for_each_cpu_mask_nr(cpu, *cpu_map)
401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
402} 402}
403EXPORT_SYMBOL_GPL(flush_workqueue); 403EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -477,7 +477,7 @@ static void wait_on_work(struct work_struct *work)
477 wq = cwq->wq; 477 wq = cwq->wq;
478 cpu_map = wq_cpu_map(wq); 478 cpu_map = wq_cpu_map(wq);
479 479
480 for_each_cpu_mask(cpu, *cpu_map) 480 for_each_cpu_mask_nr(cpu, *cpu_map)
481 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 481 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
482} 482}
483 483
@@ -813,7 +813,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
813 list_del(&wq->list); 813 list_del(&wq->list);
814 spin_unlock(&workqueue_lock); 814 spin_unlock(&workqueue_lock);
815 815
816 for_each_cpu_mask(cpu, *cpu_map) 816 for_each_cpu_mask_nr(cpu, *cpu_map)
817 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 817 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
818 put_online_cpus(); 818 put_online_cpus();
819 819
diff --git a/lib/cpumask.c b/lib/cpumask.c
index bb4f76d3c3e7..5f97dc25ef9c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp)
15} 15}
16EXPORT_SYMBOL(__next_cpu); 16EXPORT_SYMBOL(__next_cpu);
17 17
18#if NR_CPUS > 64
19int __next_cpu_nr(int n, const cpumask_t *srcp)
20{
21 return min_t(int, nr_cpu_ids,
22 find_next_bit(srcp->bits, nr_cpu_ids, n+1));
23}
24EXPORT_SYMBOL(__next_cpu_nr);
25#endif
26
18int __any_online_cpu(const cpumask_t *mask) 27int __any_online_cpu(const cpumask_t *mask)
19{ 28{
20 int cpu; 29 int cpu;
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 3b4dc098181e..c4381d9516f6 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -11,7 +11,7 @@ notrace unsigned int debug_smp_processor_id(void)
11{ 11{
12 unsigned long preempt_count = preempt_count(); 12 unsigned long preempt_count = preempt_count();
13 int this_cpu = raw_smp_processor_id(); 13 int this_cpu = raw_smp_processor_id();
14 cpumask_t this_mask; 14 cpumask_of_cpu_ptr_declare(this_mask);
15 15
16 if (likely(preempt_count)) 16 if (likely(preempt_count))
17 goto out; 17 goto out;
@@ -23,9 +23,9 @@ notrace unsigned int debug_smp_processor_id(void)
23 * Kernel threads bound to a single CPU can safely use 23 * Kernel threads bound to a single CPU can safely use
24 * smp_processor_id(): 24 * smp_processor_id():
25 */ 25 */
26 this_mask = cpumask_of_cpu(this_cpu); 26 cpumask_of_cpu_ptr_next(this_mask, this_cpu);
27 27
28 if (cpus_equal(current->cpus_allowed, this_mask)) 28 if (cpus_equal(current->cpus_allowed, *this_mask))
29 goto out; 29 goto out;
30 30
31 /* 31 /*
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index 05f2b4009ccc..843364594e23 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(percpu_depopulate);
35void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) 35void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
36{ 36{
37 int cpu; 37 int cpu;
38 for_each_cpu_mask(cpu, *mask) 38 for_each_cpu_mask_nr(cpu, *mask)
39 percpu_depopulate(__pdata, cpu); 39 percpu_depopulate(__pdata, cpu);
40} 40}
41EXPORT_SYMBOL_GPL(__percpu_depopulate_mask); 41EXPORT_SYMBOL_GPL(__percpu_depopulate_mask);
@@ -86,7 +86,7 @@ int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
86 int cpu; 86 int cpu;
87 87
88 cpus_clear(populated); 88 cpus_clear(populated);
89 for_each_cpu_mask(cpu, *mask) 89 for_each_cpu_mask_nr(cpu, *mask)
90 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) { 90 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
91 __percpu_depopulate_mask(__pdata, &populated); 91 __percpu_depopulate_mask(__pdata, &populated);
92 return -ENOMEM; 92 return -ENOMEM;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index db9eabb2c5b3..c3d4a781802f 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -26,7 +26,7 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
26 26
27 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); 27 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
28 28
29 for_each_cpu_mask(cpu, *cpumask) { 29 for_each_cpu_mask_nr(cpu, *cpumask) {
30 struct vm_event_state *this = &per_cpu(vm_event_states, cpu); 30 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
31 31
32 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 32 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
diff --git a/net/core/dev.c b/net/core/dev.c
index 2eed17bcb2dd..106d5e6d987c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2376,7 +2376,7 @@ out:
2376 */ 2376 */
2377 if (!cpus_empty(net_dma.channel_mask)) { 2377 if (!cpus_empty(net_dma.channel_mask)) {
2378 int chan_idx; 2378 int chan_idx;
2379 for_each_cpu_mask(chan_idx, net_dma.channel_mask) { 2379 for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
2380 struct dma_chan *chan = net_dma.channels[chan_idx]; 2380 struct dma_chan *chan = net_dma.channels[chan_idx];
2381 if (chan) 2381 if (chan)
2382 dma_async_memcpy_issue_pending(chan); 2382 dma_async_memcpy_issue_pending(chan);
@@ -4510,7 +4510,7 @@ static void net_dma_rebalance(struct net_dma *net_dma)
4510 i = 0; 4510 i = 0;
4511 cpu = first_cpu(cpu_online_map); 4511 cpu = first_cpu(cpu_online_map);
4512 4512
4513 for_each_cpu_mask(chan_idx, net_dma->channel_mask) { 4513 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
4514 chan = net_dma->channels[chan_idx]; 4514 chan = net_dma->channels[chan_idx];
4515 4515
4516 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) 4516 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 265b1b289a32..705959b31e24 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -497,7 +497,7 @@ static void iucv_setmask_up(void)
497 /* Disable all cpu but the first in cpu_irq_cpumask. */ 497 /* Disable all cpu but the first in cpu_irq_cpumask. */
498 cpumask = iucv_irq_cpumask; 498 cpumask = iucv_irq_cpumask;
499 cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); 499 cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
500 for_each_cpu_mask(cpu, cpumask) 500 for_each_cpu_mask_nr(cpu, cpumask)
501 smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); 501 smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
502} 502}
503 503
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 5a32cb7c4bb4..835d27413083 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -310,7 +310,8 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
310 switch (m->mode) { 310 switch (m->mode) {
311 case SVC_POOL_PERCPU: 311 case SVC_POOL_PERCPU:
312 { 312 {
313 set_cpus_allowed_ptr(task, &cpumask_of_cpu(node)); 313 cpumask_of_cpu_ptr(cpumask, node);
314 set_cpus_allowed_ptr(task, cpumask);
314 break; 315 break;
315 } 316 }
316 case SVC_POOL_PERNODE: 317 case SVC_POOL_PERNODE: