diff options
34 files changed, 175 insertions, 135 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index b0c8208df9fa..dd097b835839 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -202,7 +202,7 @@ static void drv_write(struct drv_cmd *cmd) | |||
202 | cpumask_t saved_mask = current->cpus_allowed; | 202 | cpumask_t saved_mask = current->cpus_allowed; |
203 | unsigned int i; | 203 | unsigned int i; |
204 | 204 | ||
205 | for_each_cpu_mask(i, cmd->mask) { | 205 | for_each_cpu_mask_nr(i, cmd->mask) { |
206 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); | 206 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); |
207 | do_drv_write(cmd); | 207 | do_drv_write(cmd); |
208 | } | 208 | } |
@@ -451,7 +451,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
451 | 451 | ||
452 | freqs.old = perf->states[perf->state].core_frequency * 1000; | 452 | freqs.old = perf->states[perf->state].core_frequency * 1000; |
453 | freqs.new = data->freq_table[next_state].frequency; | 453 | freqs.new = data->freq_table[next_state].frequency; |
454 | for_each_cpu_mask(i, cmd.mask) { | 454 | for_each_cpu_mask_nr(i, cmd.mask) { |
455 | freqs.cpu = i; | 455 | freqs.cpu = i; |
456 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 456 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
457 | } | 457 | } |
@@ -466,7 +466,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
466 | } | 466 | } |
467 | } | 467 | } |
468 | 468 | ||
469 | for_each_cpu_mask(i, cmd.mask) { | 469 | for_each_cpu_mask_nr(i, cmd.mask) { |
470 | freqs.cpu = i; | 470 | freqs.cpu = i; |
471 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 471 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
472 | } | 472 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 199e4e05e5dc..f1685fb91fbd 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, | |||
122 | return 0; | 122 | return 0; |
123 | 123 | ||
124 | /* notifiers */ | 124 | /* notifiers */ |
125 | for_each_cpu_mask(i, policy->cpus) { | 125 | for_each_cpu_mask_nr(i, policy->cpus) { |
126 | freqs.cpu = i; | 126 | freqs.cpu = i; |
127 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 127 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
128 | } | 128 | } |
@@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, | |||
130 | /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software | 130 | /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software |
131 | * Developer's Manual, Volume 3 | 131 | * Developer's Manual, Volume 3 |
132 | */ | 132 | */ |
133 | for_each_cpu_mask(i, policy->cpus) | 133 | for_each_cpu_mask_nr(i, policy->cpus) |
134 | cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); | 134 | cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); |
135 | 135 | ||
136 | /* notifiers */ | 136 | /* notifiers */ |
137 | for_each_cpu_mask(i, policy->cpus) { | 137 | for_each_cpu_mask_nr(i, policy->cpus) { |
138 | freqs.cpu = i; | 138 | freqs.cpu = i; |
139 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 139 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
140 | } | 140 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 206791eb46e3..c45ca6d4dce1 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -966,7 +966,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i | |||
966 | freqs.old = find_khz_freq_from_fid(data->currfid); | 966 | freqs.old = find_khz_freq_from_fid(data->currfid); |
967 | freqs.new = find_khz_freq_from_fid(fid); | 967 | freqs.new = find_khz_freq_from_fid(fid); |
968 | 968 | ||
969 | for_each_cpu_mask(i, *(data->available_cores)) { | 969 | for_each_cpu_mask_nr(i, *(data->available_cores)) { |
970 | freqs.cpu = i; | 970 | freqs.cpu = i; |
971 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 971 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
972 | } | 972 | } |
@@ -974,7 +974,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i | |||
974 | res = transition_fid_vid(data, fid, vid); | 974 | res = transition_fid_vid(data, fid, vid); |
975 | freqs.new = find_khz_freq_from_fid(data->currfid); | 975 | freqs.new = find_khz_freq_from_fid(data->currfid); |
976 | 976 | ||
977 | for_each_cpu_mask(i, *(data->available_cores)) { | 977 | for_each_cpu_mask_nr(i, *(data->available_cores)) { |
978 | freqs.cpu = i; | 978 | freqs.cpu = i; |
979 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 979 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
980 | } | 980 | } |
@@ -997,7 +997,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i | |||
997 | freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); | 997 | freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); |
998 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); | 998 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); |
999 | 999 | ||
1000 | for_each_cpu_mask(i, *(data->available_cores)) { | 1000 | for_each_cpu_mask_nr(i, *(data->available_cores)) { |
1001 | freqs.cpu = i; | 1001 | freqs.cpu = i; |
1002 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 1002 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
1003 | } | 1003 | } |
@@ -1005,7 +1005,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i | |||
1005 | res = transition_pstate(data, pstate); | 1005 | res = transition_pstate(data, pstate); |
1006 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); | 1006 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); |
1007 | 1007 | ||
1008 | for_each_cpu_mask(i, *(data->available_cores)) { | 1008 | for_each_cpu_mask_nr(i, *(data->available_cores)) { |
1009 | freqs.cpu = i; | 1009 | freqs.cpu = i; |
1010 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 1010 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
1011 | } | 1011 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 908dd347c67e..8b0dd6f2a1ac 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | |||
@@ -476,7 +476,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
476 | saved_mask = current->cpus_allowed; | 476 | saved_mask = current->cpus_allowed; |
477 | first_cpu = 1; | 477 | first_cpu = 1; |
478 | cpus_clear(covered_cpus); | 478 | cpus_clear(covered_cpus); |
479 | for_each_cpu_mask(j, online_policy_cpus) { | 479 | for_each_cpu_mask_nr(j, online_policy_cpus) { |
480 | /* | 480 | /* |
481 | * Support for SMP systems. | 481 | * Support for SMP systems. |
482 | * Make sure we are running on CPU that wants to change freq | 482 | * Make sure we are running on CPU that wants to change freq |
@@ -517,7 +517,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
517 | dprintk("target=%dkHz old=%d new=%d msr=%04x\n", | 517 | dprintk("target=%dkHz old=%d new=%d msr=%04x\n", |
518 | target_freq, freqs.old, freqs.new, msr); | 518 | target_freq, freqs.old, freqs.new, msr); |
519 | 519 | ||
520 | for_each_cpu_mask(k, online_policy_cpus) { | 520 | for_each_cpu_mask_nr(k, online_policy_cpus) { |
521 | freqs.cpu = k; | 521 | freqs.cpu = k; |
522 | cpufreq_notify_transition(&freqs, | 522 | cpufreq_notify_transition(&freqs, |
523 | CPUFREQ_PRECHANGE); | 523 | CPUFREQ_PRECHANGE); |
@@ -540,7 +540,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
540 | preempt_enable(); | 540 | preempt_enable(); |
541 | } | 541 | } |
542 | 542 | ||
543 | for_each_cpu_mask(k, online_policy_cpus) { | 543 | for_each_cpu_mask_nr(k, online_policy_cpus) { |
544 | freqs.cpu = k; | 544 | freqs.cpu = k; |
545 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 545 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
546 | } | 546 | } |
@@ -554,7 +554,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
554 | */ | 554 | */ |
555 | 555 | ||
556 | if (!cpus_empty(covered_cpus)) { | 556 | if (!cpus_empty(covered_cpus)) { |
557 | for_each_cpu_mask(j, covered_cpus) { | 557 | for_each_cpu_mask_nr(j, covered_cpus) { |
558 | set_cpus_allowed_ptr(current, | 558 | set_cpus_allowed_ptr(current, |
559 | &cpumask_of_cpu(j)); | 559 | &cpumask_of_cpu(j)); |
560 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | 560 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); |
@@ -564,7 +564,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
564 | tmp = freqs.new; | 564 | tmp = freqs.new; |
565 | freqs.new = freqs.old; | 565 | freqs.new = freqs.old; |
566 | freqs.old = tmp; | 566 | freqs.old = tmp; |
567 | for_each_cpu_mask(j, online_policy_cpus) { | 567 | for_each_cpu_mask_nr(j, online_policy_cpus) { |
568 | freqs.cpu = j; | 568 | freqs.cpu = j; |
569 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 569 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
570 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 570 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 1b50244b1fdf..191f7263c61d 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
@@ -279,7 +279,7 @@ static int speedstep_target (struct cpufreq_policy *policy, | |||
279 | 279 | ||
280 | cpus_allowed = current->cpus_allowed; | 280 | cpus_allowed = current->cpus_allowed; |
281 | 281 | ||
282 | for_each_cpu_mask(i, policy->cpus) { | 282 | for_each_cpu_mask_nr(i, policy->cpus) { |
283 | freqs.cpu = i; | 283 | freqs.cpu = i; |
284 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 284 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
285 | } | 285 | } |
@@ -292,7 +292,7 @@ static int speedstep_target (struct cpufreq_policy *policy, | |||
292 | /* allow to be run on all CPUs */ | 292 | /* allow to be run on all CPUs */ |
293 | set_cpus_allowed_ptr(current, &cpus_allowed); | 293 | set_cpus_allowed_ptr(current, &cpus_allowed); |
294 | 294 | ||
295 | for_each_cpu_mask(i, policy->cpus) { | 295 | for_each_cpu_mask_nr(i, policy->cpus) { |
296 | freqs.cpu = i; | 296 | freqs.cpu = i; |
297 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 297 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
298 | } | 298 | } |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 26d615dcb149..bfade3301c3a 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -488,7 +488,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
488 | int sibling; | 488 | int sibling; |
489 | 489 | ||
490 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 490 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
491 | for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) { | 491 | for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { |
492 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); | 492 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); |
493 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); | 493 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); |
494 | } | 494 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 7c9a813e1193..88736cadbaa6 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -527,7 +527,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
527 | if (err) | 527 | if (err) |
528 | goto out_free; | 528 | goto out_free; |
529 | 529 | ||
530 | for_each_cpu_mask(i, b->cpus) { | 530 | for_each_cpu_mask_nr(i, b->cpus) { |
531 | if (i == cpu) | 531 | if (i == cpu) |
532 | continue; | 532 | continue; |
533 | 533 | ||
@@ -617,7 +617,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) | |||
617 | #endif | 617 | #endif |
618 | 618 | ||
619 | /* remove all sibling symlinks before unregistering */ | 619 | /* remove all sibling symlinks before unregistering */ |
620 | for_each_cpu_mask(i, b->cpus) { | 620 | for_each_cpu_mask_nr(i, b->cpus) { |
621 | if (i == cpu) | 621 | if (i == cpu) |
622 | continue; | 622 | continue; |
623 | 623 | ||
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index ef1a8dfcc529..e2838cbd2ff8 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c | |||
@@ -718,7 +718,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask) | |||
718 | return 0; | 718 | return 0; |
719 | } | 719 | } |
720 | 720 | ||
721 | for_each_cpu_mask(cpu, mask) { | 721 | for_each_cpu_mask_nr(cpu, mask) { |
722 | cpumask_t domain, new_mask; | 722 | cpumask_t domain, new_mask; |
723 | int new_cpu; | 723 | int new_cpu; |
724 | int vector, offset; | 724 | int vector, offset; |
@@ -739,7 +739,7 @@ next: | |||
739 | continue; | 739 | continue; |
740 | if (vector == IA32_SYSCALL_VECTOR) | 740 | if (vector == IA32_SYSCALL_VECTOR) |
741 | goto next; | 741 | goto next; |
742 | for_each_cpu_mask(new_cpu, new_mask) | 742 | for_each_cpu_mask_nr(new_cpu, new_mask) |
743 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) | 743 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) |
744 | goto next; | 744 | goto next; |
745 | /* Found one! */ | 745 | /* Found one! */ |
@@ -749,7 +749,7 @@ next: | |||
749 | cfg->move_in_progress = 1; | 749 | cfg->move_in_progress = 1; |
750 | cfg->old_domain = cfg->domain; | 750 | cfg->old_domain = cfg->domain; |
751 | } | 751 | } |
752 | for_each_cpu_mask(new_cpu, new_mask) | 752 | for_each_cpu_mask_nr(new_cpu, new_mask) |
753 | per_cpu(vector_irq, new_cpu)[vector] = irq; | 753 | per_cpu(vector_irq, new_cpu)[vector] = irq; |
754 | cfg->vector = vector; | 754 | cfg->vector = vector; |
755 | cfg->domain = domain; | 755 | cfg->domain = domain; |
@@ -781,7 +781,7 @@ static void __clear_irq_vector(int irq) | |||
781 | 781 | ||
782 | vector = cfg->vector; | 782 | vector = cfg->vector; |
783 | cpus_and(mask, cfg->domain, cpu_online_map); | 783 | cpus_and(mask, cfg->domain, cpu_online_map); |
784 | for_each_cpu_mask(cpu, mask) | 784 | for_each_cpu_mask_nr(cpu, mask) |
785 | per_cpu(vector_irq, cpu)[vector] = -1; | 785 | per_cpu(vector_irq, cpu)[vector] = -1; |
786 | 786 | ||
787 | cfg->vector = 0; | 787 | cfg->vector = 0; |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 3e1cecedde42..197300bfebd2 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -487,7 +487,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
487 | cpu_set(cpu, cpu_sibling_setup_map); | 487 | cpu_set(cpu, cpu_sibling_setup_map); |
488 | 488 | ||
489 | if (smp_num_siblings > 1) { | 489 | if (smp_num_siblings > 1) { |
490 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | 490 | for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { |
491 | if (c->phys_proc_id == cpu_data(i).phys_proc_id && | 491 | if (c->phys_proc_id == cpu_data(i).phys_proc_id && |
492 | c->cpu_core_id == cpu_data(i).cpu_core_id) { | 492 | c->cpu_core_id == cpu_data(i).cpu_core_id) { |
493 | cpu_set(i, per_cpu(cpu_sibling_map, cpu)); | 493 | cpu_set(i, per_cpu(cpu_sibling_map, cpu)); |
@@ -510,7 +510,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
510 | return; | 510 | return; |
511 | } | 511 | } |
512 | 512 | ||
513 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | 513 | for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { |
514 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && | 514 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && |
515 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | 515 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { |
516 | cpu_set(i, c->llc_shared_map); | 516 | cpu_set(i, c->llc_shared_map); |
@@ -1300,7 +1300,7 @@ static void remove_siblinginfo(int cpu) | |||
1300 | int sibling; | 1300 | int sibling; |
1301 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 1301 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
1302 | 1302 | ||
1303 | for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { | 1303 | for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) { |
1304 | cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); | 1304 | cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); |
1305 | /*/ | 1305 | /*/ |
1306 | * last thread sibling in this cpu core going down | 1306 | * last thread sibling in this cpu core going down |
@@ -1309,7 +1309,7 @@ static void remove_siblinginfo(int cpu) | |||
1309 | cpu_data(sibling).booted_cores--; | 1309 | cpu_data(sibling).booted_cores--; |
1310 | } | 1310 | } |
1311 | 1311 | ||
1312 | for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) | 1312 | for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu)) |
1313 | cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); | 1313 | cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); |
1314 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | 1314 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); |
1315 | cpus_clear(per_cpu(cpu_core_map, cpu)); | 1315 | cpus_clear(per_cpu(cpu_core_map, cpu)); |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 94e69000f982..7a70638797ed 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -345,7 +345,7 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) | |||
345 | 345 | ||
346 | cpus_and(mask, mask, cpu_online_map); | 346 | cpus_and(mask, mask, cpu_online_map); |
347 | 347 | ||
348 | for_each_cpu_mask(cpu, mask) | 348 | for_each_cpu_mask_nr(cpu, mask) |
349 | xen_send_IPI_one(cpu, vector); | 349 | xen_send_IPI_one(cpu, vector); |
350 | } | 350 | } |
351 | 351 | ||
@@ -413,7 +413,7 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), | |||
413 | 413 | ||
414 | /* Make sure other vcpus get a chance to run if they need to. */ | 414 | /* Make sure other vcpus get a chance to run if they need to. */ |
415 | yield = false; | 415 | yield = false; |
416 | for_each_cpu_mask(cpu, mask) | 416 | for_each_cpu_mask_nr(cpu, mask) |
417 | if (xen_vcpu_stolen(cpu)) | 417 | if (xen_vcpu_stolen(cpu)) |
418 | yield = true; | 418 | yield = true; |
419 | 419 | ||
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index bb06738860c4..28509fbba6f9 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
@@ -1013,7 +1013,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1013 | * affected cpu in order to get one proper T-state. | 1013 | * affected cpu in order to get one proper T-state. |
1014 | * The notifier event is THROTTLING_PRECHANGE. | 1014 | * The notifier event is THROTTLING_PRECHANGE. |
1015 | */ | 1015 | */ |
1016 | for_each_cpu_mask(i, online_throttling_cpus) { | 1016 | for_each_cpu_mask_nr(i, online_throttling_cpus) { |
1017 | t_state.cpu = i; | 1017 | t_state.cpu = i; |
1018 | acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, | 1018 | acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, |
1019 | &t_state); | 1019 | &t_state); |
@@ -1034,7 +1034,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1034 | * it is necessary to set T-state for every affected | 1034 | * it is necessary to set T-state for every affected |
1035 | * cpus. | 1035 | * cpus. |
1036 | */ | 1036 | */ |
1037 | for_each_cpu_mask(i, online_throttling_cpus) { | 1037 | for_each_cpu_mask_nr(i, online_throttling_cpus) { |
1038 | match_pr = processors[i]; | 1038 | match_pr = processors[i]; |
1039 | /* | 1039 | /* |
1040 | * If the pointer is invalid, we will report the | 1040 | * If the pointer is invalid, we will report the |
@@ -1068,7 +1068,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1068 | * affected cpu to update the T-states. | 1068 | * affected cpu to update the T-states. |
1069 | * The notifier event is THROTTLING_POSTCHANGE | 1069 | * The notifier event is THROTTLING_POSTCHANGE |
1070 | */ | 1070 | */ |
1071 | for_each_cpu_mask(i, online_throttling_cpus) { | 1071 | for_each_cpu_mask_nr(i, online_throttling_cpus) { |
1072 | t_state.cpu = i; | 1072 | t_state.cpu = i; |
1073 | acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, | 1073 | acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, |
1074 | &t_state); | 1074 | &t_state); |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index e38dfed41d80..5000402ae092 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -119,14 +119,14 @@ static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \ | |||
119 | { \ | 119 | { \ |
120 | return print_cpus_map(buf, &cpu_##type##_map); \ | 120 | return print_cpus_map(buf, &cpu_##type##_map); \ |
121 | } \ | 121 | } \ |
122 | struct sysdev_class_attribute attr_##type##_map = \ | 122 | static struct sysdev_class_attribute attr_##type##_map = \ |
123 | _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL) | 123 | _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL) |
124 | 124 | ||
125 | print_cpus_func(online); | 125 | print_cpus_func(online); |
126 | print_cpus_func(possible); | 126 | print_cpus_func(possible); |
127 | print_cpus_func(present); | 127 | print_cpus_func(present); |
128 | 128 | ||
129 | struct sysdev_class_attribute *cpu_state_attr[] = { | 129 | static struct sysdev_class_attribute *cpu_state_attr[] = { |
130 | &attr_online_map, | 130 | &attr_online_map, |
131 | &attr_possible_map, | 131 | &attr_possible_map, |
132 | &attr_present_map, | 132 | &attr_present_map, |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 1d41496ed2f8..d8f8b1e0edde 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -589,7 +589,7 @@ static ssize_t show_cpus(cpumask_t mask, char *buf) | |||
589 | ssize_t i = 0; | 589 | ssize_t i = 0; |
590 | unsigned int cpu; | 590 | unsigned int cpu; |
591 | 591 | ||
592 | for_each_cpu_mask(cpu, mask) { | 592 | for_each_cpu_mask_nr(cpu, mask) { |
593 | if (i) | 593 | if (i) |
594 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); | 594 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); |
595 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); | 595 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); |
@@ -835,7 +835,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
835 | } | 835 | } |
836 | #endif | 836 | #endif |
837 | 837 | ||
838 | for_each_cpu_mask(j, policy->cpus) { | 838 | for_each_cpu_mask_nr(j, policy->cpus) { |
839 | if (cpu == j) | 839 | if (cpu == j) |
840 | continue; | 840 | continue; |
841 | 841 | ||
@@ -898,14 +898,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
898 | } | 898 | } |
899 | 899 | ||
900 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 900 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
901 | for_each_cpu_mask(j, policy->cpus) { | 901 | for_each_cpu_mask_nr(j, policy->cpus) { |
902 | cpufreq_cpu_data[j] = policy; | 902 | cpufreq_cpu_data[j] = policy; |
903 | per_cpu(policy_cpu, j) = policy->cpu; | 903 | per_cpu(policy_cpu, j) = policy->cpu; |
904 | } | 904 | } |
905 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 905 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
906 | 906 | ||
907 | /* symlink affected CPUs */ | 907 | /* symlink affected CPUs */ |
908 | for_each_cpu_mask(j, policy->cpus) { | 908 | for_each_cpu_mask_nr(j, policy->cpus) { |
909 | if (j == cpu) | 909 | if (j == cpu) |
910 | continue; | 910 | continue; |
911 | if (!cpu_online(j)) | 911 | if (!cpu_online(j)) |
@@ -945,7 +945,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
945 | 945 | ||
946 | err_out_unregister: | 946 | err_out_unregister: |
947 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 947 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
948 | for_each_cpu_mask(j, policy->cpus) | 948 | for_each_cpu_mask_nr(j, policy->cpus) |
949 | cpufreq_cpu_data[j] = NULL; | 949 | cpufreq_cpu_data[j] = NULL; |
950 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 950 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
951 | 951 | ||
@@ -1028,7 +1028,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1028 | * links afterwards. | 1028 | * links afterwards. |
1029 | */ | 1029 | */ |
1030 | if (unlikely(cpus_weight(data->cpus) > 1)) { | 1030 | if (unlikely(cpus_weight(data->cpus) > 1)) { |
1031 | for_each_cpu_mask(j, data->cpus) { | 1031 | for_each_cpu_mask_nr(j, data->cpus) { |
1032 | if (j == cpu) | 1032 | if (j == cpu) |
1033 | continue; | 1033 | continue; |
1034 | cpufreq_cpu_data[j] = NULL; | 1034 | cpufreq_cpu_data[j] = NULL; |
@@ -1038,7 +1038,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1038 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1038 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1039 | 1039 | ||
1040 | if (unlikely(cpus_weight(data->cpus) > 1)) { | 1040 | if (unlikely(cpus_weight(data->cpus) > 1)) { |
1041 | for_each_cpu_mask(j, data->cpus) { | 1041 | for_each_cpu_mask_nr(j, data->cpus) { |
1042 | if (j == cpu) | 1042 | if (j == cpu) |
1043 | continue; | 1043 | continue; |
1044 | dprintk("removing link for cpu %u\n", j); | 1044 | dprintk("removing link for cpu %u\n", j); |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 5d3a04ba6ad2..fe565ee43757 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -497,7 +497,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
497 | return rc; | 497 | return rc; |
498 | } | 498 | } |
499 | 499 | ||
500 | for_each_cpu_mask(j, policy->cpus) { | 500 | for_each_cpu_mask_nr(j, policy->cpus) { |
501 | struct cpu_dbs_info_s *j_dbs_info; | 501 | struct cpu_dbs_info_s *j_dbs_info; |
502 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 502 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
503 | j_dbs_info->cur_policy = policy; | 503 | j_dbs_info->cur_policy = policy; |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index d2af20dda382..33855cb3cf16 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -367,7 +367,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
367 | 367 | ||
368 | /* Get Idle Time */ | 368 | /* Get Idle Time */ |
369 | idle_ticks = UINT_MAX; | 369 | idle_ticks = UINT_MAX; |
370 | for_each_cpu_mask(j, policy->cpus) { | 370 | for_each_cpu_mask_nr(j, policy->cpus) { |
371 | cputime64_t total_idle_ticks; | 371 | cputime64_t total_idle_ticks; |
372 | unsigned int tmp_idle_ticks; | 372 | unsigned int tmp_idle_ticks; |
373 | struct cpu_dbs_info_s *j_dbs_info; | 373 | struct cpu_dbs_info_s *j_dbs_info; |
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
521 | return rc; | 521 | return rc; |
522 | } | 522 | } |
523 | 523 | ||
524 | for_each_cpu_mask(j, policy->cpus) { | 524 | for_each_cpu_mask_nr(j, policy->cpus) { |
525 | struct cpu_dbs_info_s *j_dbs_info; | 525 | struct cpu_dbs_info_s *j_dbs_info; |
526 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 526 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
527 | j_dbs_info->cur_policy = policy; | 527 | j_dbs_info->cur_policy = policy; |
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index ce1ab0571be3..43180b952c1f 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -641,8 +641,8 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool) | |||
641 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); | 641 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); |
642 | 642 | ||
643 | spin_lock_irqsave(&pool->last_cpu_lock, flags); | 643 | spin_lock_irqsave(&pool->last_cpu_lock, flags); |
644 | cpu = next_cpu(pool->last_cpu, cpu_online_map); | 644 | cpu = next_cpu_nr(pool->last_cpu, cpu_online_map); |
645 | if (cpu == NR_CPUS) | 645 | if (cpu >= nr_cpu_ids) |
646 | cpu = first_cpu(cpu_online_map); | 646 | cpu = first_cpu(cpu_online_map); |
647 | pool->last_cpu = cpu; | 647 | pool->last_cpu = cpu; |
648 | spin_unlock_irqrestore(&pool->last_cpu_lock, flags); | 648 | spin_unlock_irqrestore(&pool->last_cpu_lock, flags); |
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h index ecc80f341f37..5f7310aa3efd 100644 --- a/include/asm-x86/ipi.h +++ b/include/asm-x86/ipi.h | |||
@@ -121,7 +121,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) | |||
121 | * - mbligh | 121 | * - mbligh |
122 | */ | 122 | */ |
123 | local_irq_save(flags); | 123 | local_irq_save(flags); |
124 | for_each_cpu_mask(query_cpu, mask) { | 124 | for_each_cpu_mask_nr(query_cpu, mask) { |
125 | __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), | 125 | __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), |
126 | vector, APIC_DEST_PHYSICAL); | 126 | vector, APIC_DEST_PHYSICAL); |
127 | } | 127 | } |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index c24875bd9c5b..47418b1b4103 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -17,6 +17,20 @@ | |||
17 | * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c. | 17 | * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c. |
18 | * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c. | 18 | * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c. |
19 | * | 19 | * |
20 | * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | ||
21 | * Note: The alternate operations with the suffix "_nr" are used | ||
22 | * to limit the range of the loop to nr_cpu_ids instead of | ||
23 | * NR_CPUS when NR_CPUS > 64 for performance reasons. | ||
24 | * If NR_CPUS is <= 64 then most assembler bitmask | ||
25 | * operators execute faster with a constant range, so | ||
26 | * the operator will continue to use NR_CPUS. | ||
27 | * | ||
28 | * Another consideration is that nr_cpu_ids is initialized | ||
29 | * to NR_CPUS and isn't lowered until the possible cpus are | ||
30 | * discovered (including any disabled cpus). So early uses | ||
31 | * will span the entire range of NR_CPUS. | ||
32 | * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | ||
33 | * | ||
20 | * The available cpumask operations are: | 34 | * The available cpumask operations are: |
21 | * | 35 | * |
22 | * void cpu_set(cpu, mask) turn on bit 'cpu' in mask | 36 | * void cpu_set(cpu, mask) turn on bit 'cpu' in mask |
@@ -38,12 +52,14 @@ | |||
38 | * int cpus_empty(mask) Is mask empty (no bits sets)? | 52 | * int cpus_empty(mask) Is mask empty (no bits sets)? |
39 | * int cpus_full(mask) Is mask full (all bits sets)? | 53 | * int cpus_full(mask) Is mask full (all bits sets)? |
40 | * int cpus_weight(mask) Hamming weigh - number of set bits | 54 | * int cpus_weight(mask) Hamming weigh - number of set bits |
55 | * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS | ||
41 | * | 56 | * |
42 | * void cpus_shift_right(dst, src, n) Shift right | 57 | * void cpus_shift_right(dst, src, n) Shift right |
43 | * void cpus_shift_left(dst, src, n) Shift left | 58 | * void cpus_shift_left(dst, src, n) Shift left |
44 | * | 59 | * |
45 | * int first_cpu(mask) Number lowest set bit, or NR_CPUS | 60 | * int first_cpu(mask) Number lowest set bit, or NR_CPUS |
46 | * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS | 61 | * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS |
62 | * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids | ||
47 | * | 63 | * |
48 | * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set | 64 | * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set |
49 | * CPU_MASK_ALL Initializer - all bits set | 65 | * CPU_MASK_ALL Initializer - all bits set |
@@ -59,7 +75,8 @@ | |||
59 | * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap | 75 | * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap |
60 | * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz | 76 | * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz |
61 | * | 77 | * |
62 | * for_each_cpu_mask(cpu, mask) for-loop cpu over mask | 78 | * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS |
79 | * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids | ||
63 | * | 80 | * |
64 | * int num_online_cpus() Number of online CPUs | 81 | * int num_online_cpus() Number of online CPUs |
65 | * int num_possible_cpus() Number of all possible CPUs | 82 | * int num_possible_cpus() Number of all possible CPUs |
@@ -216,15 +233,6 @@ static inline void __cpus_shift_left(cpumask_t *dstp, | |||
216 | bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); | 233 | bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); |
217 | } | 234 | } |
218 | 235 | ||
219 | #ifdef CONFIG_SMP | ||
220 | int __first_cpu(const cpumask_t *srcp); | ||
221 | #define first_cpu(src) __first_cpu(&(src)) | ||
222 | int __next_cpu(int n, const cpumask_t *srcp); | ||
223 | #define next_cpu(n, src) __next_cpu((n), &(src)) | ||
224 | #else | ||
225 | #define first_cpu(src) ({ (void)(src); 0; }) | ||
226 | #define next_cpu(n, src) ({ (void)(src); 1; }) | ||
227 | #endif | ||
228 | 236 | ||
229 | #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP | 237 | #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP |
230 | extern cpumask_t *cpumask_of_cpu_map; | 238 | extern cpumask_t *cpumask_of_cpu_map; |
@@ -343,15 +351,48 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, | |||
343 | bitmap_fold(dstp->bits, origp->bits, sz, nbits); | 351 | bitmap_fold(dstp->bits, origp->bits, sz, nbits); |
344 | } | 352 | } |
345 | 353 | ||
346 | #if NR_CPUS > 1 | 354 | #if NR_CPUS == 1 |
347 | #define for_each_cpu_mask(cpu, mask) \ | 355 | |
348 | for ((cpu) = first_cpu(mask); \ | 356 | #define nr_cpu_ids 1 |
349 | (cpu) < NR_CPUS; \ | 357 | #define first_cpu(src) ({ (void)(src); 0; }) |
350 | (cpu) = next_cpu((cpu), (mask))) | 358 | #define next_cpu(n, src) ({ (void)(src); 1; }) |
351 | #else /* NR_CPUS == 1 */ | 359 | #define any_online_cpu(mask) 0 |
352 | #define for_each_cpu_mask(cpu, mask) \ | 360 | #define for_each_cpu_mask(cpu, mask) \ |
353 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) | 361 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) |
354 | #endif /* NR_CPUS */ | 362 | |
363 | #else /* NR_CPUS > 1 */ | ||
364 | |||
365 | extern int nr_cpu_ids; | ||
366 | int __first_cpu(const cpumask_t *srcp); | ||
367 | int __next_cpu(int n, const cpumask_t *srcp); | ||
368 | int __any_online_cpu(const cpumask_t *mask); | ||
369 | |||
370 | #define first_cpu(src) __first_cpu(&(src)) | ||
371 | #define next_cpu(n, src) __next_cpu((n), &(src)) | ||
372 | #define any_online_cpu(mask) __any_online_cpu(&(mask)) | ||
373 | #define for_each_cpu_mask(cpu, mask) \ | ||
374 | for ((cpu) = -1; \ | ||
375 | (cpu) = next_cpu((cpu), (mask)), \ | ||
376 | (cpu) < NR_CPUS; ) | ||
377 | #endif | ||
378 | |||
379 | #if NR_CPUS <= 64 | ||
380 | |||
381 | #define next_cpu_nr(n, src) next_cpu(n, src) | ||
382 | #define cpus_weight_nr(cpumask) cpus_weight(cpumask) | ||
383 | #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) | ||
384 | |||
385 | #else /* NR_CPUS > 64 */ | ||
386 | |||
387 | int __next_cpu_nr(int n, const cpumask_t *srcp); | ||
388 | #define next_cpu_nr(n, src) __next_cpu_nr((n), &(src)) | ||
389 | #define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids) | ||
390 | #define for_each_cpu_mask_nr(cpu, mask) \ | ||
391 | for ((cpu) = -1; \ | ||
392 | (cpu) = next_cpu_nr((cpu), (mask)), \ | ||
393 | (cpu) < nr_cpu_ids; ) | ||
394 | |||
395 | #endif /* NR_CPUS > 64 */ | ||
355 | 396 | ||
356 | #define next_cpu_nr(n, src) next_cpu(n, src) | 397 | #define next_cpu_nr(n, src) next_cpu(n, src) |
357 | #define cpus_weight_nr(cpumask) cpus_weight(cpumask) | 398 | #define cpus_weight_nr(cpumask) cpus_weight(cpumask) |
@@ -418,9 +459,9 @@ extern cpumask_t cpu_online_map; | |||
418 | extern cpumask_t cpu_present_map; | 459 | extern cpumask_t cpu_present_map; |
419 | 460 | ||
420 | #if NR_CPUS > 1 | 461 | #if NR_CPUS > 1 |
421 | #define num_online_cpus() cpus_weight(cpu_online_map) | 462 | #define num_online_cpus() cpus_weight_nr(cpu_online_map) |
422 | #define num_possible_cpus() cpus_weight(cpu_possible_map) | 463 | #define num_possible_cpus() cpus_weight_nr(cpu_possible_map) |
423 | #define num_present_cpus() cpus_weight(cpu_present_map) | 464 | #define num_present_cpus() cpus_weight_nr(cpu_present_map) |
424 | #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) | 465 | #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) |
425 | #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) | 466 | #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) |
426 | #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) | 467 | #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) |
@@ -435,17 +476,8 @@ extern cpumask_t cpu_present_map; | |||
435 | 476 | ||
436 | #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) | 477 | #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) |
437 | 478 | ||
438 | #ifdef CONFIG_SMP | 479 | #define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map) |
439 | extern int nr_cpu_ids; | 480 | #define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) |
440 | #define any_online_cpu(mask) __any_online_cpu(&(mask)) | 481 | #define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) |
441 | int __any_online_cpu(const cpumask_t *mask); | ||
442 | #else | ||
443 | #define nr_cpu_ids 1 | ||
444 | #define any_online_cpu(mask) 0 | ||
445 | #endif | ||
446 | |||
447 | #define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map) | ||
448 | #define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map) | ||
449 | #define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map) | ||
450 | 482 | ||
451 | #endif /* __LINUX_CPUMASK_H */ | 483 | #endif /* __LINUX_CPUMASK_H */ |
diff --git a/kernel/cpu.c b/kernel/cpu.c index c77bc3a1c722..50ae922c6022 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -390,7 +390,7 @@ void __ref enable_nonboot_cpus(void) | |||
390 | goto out; | 390 | goto out; |
391 | 391 | ||
392 | printk("Enabling non-boot CPUs ...\n"); | 392 | printk("Enabling non-boot CPUs ...\n"); |
393 | for_each_cpu_mask(cpu, frozen_cpus) { | 393 | for_each_cpu_mask_nr(cpu, frozen_cpus) { |
394 | error = _cpu_up(cpu, 1); | 394 | error = _cpu_up(cpu, 1); |
395 | if (!error) { | 395 | if (!error) { |
396 | printk("CPU%d is up\n", cpu); | 396 | printk("CPU%d is up\n", cpu); |
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index a38895a5b8e2..adde10388d0c 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
@@ -106,7 +106,7 @@ static void force_quiescent_state(struct rcu_data *rdp, | |||
106 | */ | 106 | */ |
107 | cpus_and(cpumask, rcp->cpumask, cpu_online_map); | 107 | cpus_and(cpumask, rcp->cpumask, cpu_online_map); |
108 | cpu_clear(rdp->cpu, cpumask); | 108 | cpu_clear(rdp->cpu, cpumask); |
109 | for_each_cpu_mask(cpu, cpumask) | 109 | for_each_cpu_mask_nr(cpu, cpumask) |
110 | smp_send_reschedule(cpu); | 110 | smp_send_reschedule(cpu); |
111 | } | 111 | } |
112 | } | 112 | } |
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index 5e02b7740702..5cbd69edf5d9 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c | |||
@@ -655,7 +655,7 @@ rcu_try_flip_idle(void) | |||
655 | 655 | ||
656 | /* Now ask each CPU for acknowledgement of the flip. */ | 656 | /* Now ask each CPU for acknowledgement of the flip. */ |
657 | 657 | ||
658 | for_each_cpu_mask(cpu, rcu_cpu_online_map) { | 658 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { |
659 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; | 659 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; |
660 | dyntick_save_progress_counter(cpu); | 660 | dyntick_save_progress_counter(cpu); |
661 | } | 661 | } |
@@ -673,7 +673,7 @@ rcu_try_flip_waitack(void) | |||
673 | int cpu; | 673 | int cpu; |
674 | 674 | ||
675 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); | 675 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); |
676 | for_each_cpu_mask(cpu, rcu_cpu_online_map) | 676 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) |
677 | if (rcu_try_flip_waitack_needed(cpu) && | 677 | if (rcu_try_flip_waitack_needed(cpu) && |
678 | per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { | 678 | per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { |
679 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); | 679 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); |
@@ -705,7 +705,7 @@ rcu_try_flip_waitzero(void) | |||
705 | /* Check to see if the sum of the "last" counters is zero. */ | 705 | /* Check to see if the sum of the "last" counters is zero. */ |
706 | 706 | ||
707 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); | 707 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); |
708 | for_each_cpu_mask(cpu, rcu_cpu_online_map) | 708 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) |
709 | sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; | 709 | sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; |
710 | if (sum != 0) { | 710 | if (sum != 0) { |
711 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); | 711 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); |
@@ -720,7 +720,7 @@ rcu_try_flip_waitzero(void) | |||
720 | smp_mb(); /* ^^^^^^^^^^^^ */ | 720 | smp_mb(); /* ^^^^^^^^^^^^ */ |
721 | 721 | ||
722 | /* Call for a memory barrier from each CPU. */ | 722 | /* Call for a memory barrier from each CPU. */ |
723 | for_each_cpu_mask(cpu, rcu_cpu_online_map) { | 723 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { |
724 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; | 724 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; |
725 | dyntick_save_progress_counter(cpu); | 725 | dyntick_save_progress_counter(cpu); |
726 | } | 726 | } |
@@ -740,7 +740,7 @@ rcu_try_flip_waitmb(void) | |||
740 | int cpu; | 740 | int cpu; |
741 | 741 | ||
742 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); | 742 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); |
743 | for_each_cpu_mask(cpu, rcu_cpu_online_map) | 743 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) |
744 | if (rcu_try_flip_waitmb_needed(cpu) && | 744 | if (rcu_try_flip_waitmb_needed(cpu) && |
745 | per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { | 745 | per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { |
746 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); | 746 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); |
diff --git a/kernel/sched.c b/kernel/sched.c index 94ead43eda62..e6795e39c8ab 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1939,7 +1939,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
1939 | /* Tally up the load of all CPUs in the group */ | 1939 | /* Tally up the load of all CPUs in the group */ |
1940 | avg_load = 0; | 1940 | avg_load = 0; |
1941 | 1941 | ||
1942 | for_each_cpu_mask(i, group->cpumask) { | 1942 | for_each_cpu_mask_nr(i, group->cpumask) { |
1943 | /* Bias balancing toward cpus of our domain */ | 1943 | /* Bias balancing toward cpus of our domain */ |
1944 | if (local_group) | 1944 | if (local_group) |
1945 | load = source_load(i, load_idx); | 1945 | load = source_load(i, load_idx); |
@@ -1981,7 +1981,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, | |||
1981 | /* Traverse only the allowed CPUs */ | 1981 | /* Traverse only the allowed CPUs */ |
1982 | cpus_and(*tmp, group->cpumask, p->cpus_allowed); | 1982 | cpus_and(*tmp, group->cpumask, p->cpus_allowed); |
1983 | 1983 | ||
1984 | for_each_cpu_mask(i, *tmp) { | 1984 | for_each_cpu_mask_nr(i, *tmp) { |
1985 | load = weighted_cpuload(i); | 1985 | load = weighted_cpuload(i); |
1986 | 1986 | ||
1987 | if (load < min_load || (load == min_load && i == this_cpu)) { | 1987 | if (load < min_load || (load == min_load && i == this_cpu)) { |
@@ -2964,7 +2964,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2964 | max_cpu_load = 0; | 2964 | max_cpu_load = 0; |
2965 | min_cpu_load = ~0UL; | 2965 | min_cpu_load = ~0UL; |
2966 | 2966 | ||
2967 | for_each_cpu_mask(i, group->cpumask) { | 2967 | for_each_cpu_mask_nr(i, group->cpumask) { |
2968 | struct rq *rq; | 2968 | struct rq *rq; |
2969 | 2969 | ||
2970 | if (!cpu_isset(i, *cpus)) | 2970 | if (!cpu_isset(i, *cpus)) |
@@ -3228,7 +3228,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | |||
3228 | unsigned long max_load = 0; | 3228 | unsigned long max_load = 0; |
3229 | int i; | 3229 | int i; |
3230 | 3230 | ||
3231 | for_each_cpu_mask(i, group->cpumask) { | 3231 | for_each_cpu_mask_nr(i, group->cpumask) { |
3232 | unsigned long wl; | 3232 | unsigned long wl; |
3233 | 3233 | ||
3234 | if (!cpu_isset(i, *cpus)) | 3234 | if (!cpu_isset(i, *cpus)) |
@@ -3759,7 +3759,7 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
3759 | int balance_cpu; | 3759 | int balance_cpu; |
3760 | 3760 | ||
3761 | cpu_clear(this_cpu, cpus); | 3761 | cpu_clear(this_cpu, cpus); |
3762 | for_each_cpu_mask(balance_cpu, cpus) { | 3762 | for_each_cpu_mask_nr(balance_cpu, cpus) { |
3763 | /* | 3763 | /* |
3764 | * If this cpu gets work to do, stop the load balancing | 3764 | * If this cpu gets work to do, stop the load balancing |
3765 | * work being done for other cpus. Next load | 3765 | * work being done for other cpus. Next load |
@@ -6491,7 +6491,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, | |||
6491 | 6491 | ||
6492 | cpus_clear(*covered); | 6492 | cpus_clear(*covered); |
6493 | 6493 | ||
6494 | for_each_cpu_mask(i, *span) { | 6494 | for_each_cpu_mask_nr(i, *span) { |
6495 | struct sched_group *sg; | 6495 | struct sched_group *sg; |
6496 | int group = group_fn(i, cpu_map, &sg, tmpmask); | 6496 | int group = group_fn(i, cpu_map, &sg, tmpmask); |
6497 | int j; | 6497 | int j; |
@@ -6502,7 +6502,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, | |||
6502 | cpus_clear(sg->cpumask); | 6502 | cpus_clear(sg->cpumask); |
6503 | sg->__cpu_power = 0; | 6503 | sg->__cpu_power = 0; |
6504 | 6504 | ||
6505 | for_each_cpu_mask(j, *span) { | 6505 | for_each_cpu_mask_nr(j, *span) { |
6506 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) | 6506 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) |
6507 | continue; | 6507 | continue; |
6508 | 6508 | ||
@@ -6538,9 +6538,9 @@ static int find_next_best_node(int node, nodemask_t *used_nodes) | |||
6538 | 6538 | ||
6539 | min_val = INT_MAX; | 6539 | min_val = INT_MAX; |
6540 | 6540 | ||
6541 | for (i = 0; i < MAX_NUMNODES; i++) { | 6541 | for (i = 0; i < nr_node_ids; i++) { |
6542 | /* Start at @node */ | 6542 | /* Start at @node */ |
6543 | n = (node + i) % MAX_NUMNODES; | 6543 | n = (node + i) % nr_node_ids; |
6544 | 6544 | ||
6545 | if (!nr_cpus_node(n)) | 6545 | if (!nr_cpus_node(n)) |
6546 | continue; | 6546 | continue; |
@@ -6702,7 +6702,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
6702 | if (!sg) | 6702 | if (!sg) |
6703 | return; | 6703 | return; |
6704 | do { | 6704 | do { |
6705 | for_each_cpu_mask(j, sg->cpumask) { | 6705 | for_each_cpu_mask_nr(j, sg->cpumask) { |
6706 | struct sched_domain *sd; | 6706 | struct sched_domain *sd; |
6707 | 6707 | ||
6708 | sd = &per_cpu(phys_domains, j); | 6708 | sd = &per_cpu(phys_domains, j); |
@@ -6727,14 +6727,14 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | |||
6727 | { | 6727 | { |
6728 | int cpu, i; | 6728 | int cpu, i; |
6729 | 6729 | ||
6730 | for_each_cpu_mask(cpu, *cpu_map) { | 6730 | for_each_cpu_mask_nr(cpu, *cpu_map) { |
6731 | struct sched_group **sched_group_nodes | 6731 | struct sched_group **sched_group_nodes |
6732 | = sched_group_nodes_bycpu[cpu]; | 6732 | = sched_group_nodes_bycpu[cpu]; |
6733 | 6733 | ||
6734 | if (!sched_group_nodes) | 6734 | if (!sched_group_nodes) |
6735 | continue; | 6735 | continue; |
6736 | 6736 | ||
6737 | for (i = 0; i < MAX_NUMNODES; i++) { | 6737 | for (i = 0; i < nr_node_ids; i++) { |
6738 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | 6738 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
6739 | 6739 | ||
6740 | *nodemask = node_to_cpumask(i); | 6740 | *nodemask = node_to_cpumask(i); |
@@ -6927,7 +6927,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
6927 | /* | 6927 | /* |
6928 | * Allocate the per-node list of sched groups | 6928 | * Allocate the per-node list of sched groups |
6929 | */ | 6929 | */ |
6930 | sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *), | 6930 | sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *), |
6931 | GFP_KERNEL); | 6931 | GFP_KERNEL); |
6932 | if (!sched_group_nodes) { | 6932 | if (!sched_group_nodes) { |
6933 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | 6933 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
@@ -6966,7 +6966,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
6966 | /* | 6966 | /* |
6967 | * Set up domains for cpus specified by the cpu_map. | 6967 | * Set up domains for cpus specified by the cpu_map. |
6968 | */ | 6968 | */ |
6969 | for_each_cpu_mask(i, *cpu_map) { | 6969 | for_each_cpu_mask_nr(i, *cpu_map) { |
6970 | struct sched_domain *sd = NULL, *p; | 6970 | struct sched_domain *sd = NULL, *p; |
6971 | SCHED_CPUMASK_VAR(nodemask, allmasks); | 6971 | SCHED_CPUMASK_VAR(nodemask, allmasks); |
6972 | 6972 | ||
@@ -7033,7 +7033,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7033 | 7033 | ||
7034 | #ifdef CONFIG_SCHED_SMT | 7034 | #ifdef CONFIG_SCHED_SMT |
7035 | /* Set up CPU (sibling) groups */ | 7035 | /* Set up CPU (sibling) groups */ |
7036 | for_each_cpu_mask(i, *cpu_map) { | 7036 | for_each_cpu_mask_nr(i, *cpu_map) { |
7037 | SCHED_CPUMASK_VAR(this_sibling_map, allmasks); | 7037 | SCHED_CPUMASK_VAR(this_sibling_map, allmasks); |
7038 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7038 | SCHED_CPUMASK_VAR(send_covered, allmasks); |
7039 | 7039 | ||
@@ -7050,7 +7050,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7050 | 7050 | ||
7051 | #ifdef CONFIG_SCHED_MC | 7051 | #ifdef CONFIG_SCHED_MC |
7052 | /* Set up multi-core groups */ | 7052 | /* Set up multi-core groups */ |
7053 | for_each_cpu_mask(i, *cpu_map) { | 7053 | for_each_cpu_mask_nr(i, *cpu_map) { |
7054 | SCHED_CPUMASK_VAR(this_core_map, allmasks); | 7054 | SCHED_CPUMASK_VAR(this_core_map, allmasks); |
7055 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7055 | SCHED_CPUMASK_VAR(send_covered, allmasks); |
7056 | 7056 | ||
@@ -7066,7 +7066,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7066 | #endif | 7066 | #endif |
7067 | 7067 | ||
7068 | /* Set up physical groups */ | 7068 | /* Set up physical groups */ |
7069 | for (i = 0; i < MAX_NUMNODES; i++) { | 7069 | for (i = 0; i < nr_node_ids; i++) { |
7070 | SCHED_CPUMASK_VAR(nodemask, allmasks); | 7070 | SCHED_CPUMASK_VAR(nodemask, allmasks); |
7071 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7071 | SCHED_CPUMASK_VAR(send_covered, allmasks); |
7072 | 7072 | ||
@@ -7090,7 +7090,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7090 | send_covered, tmpmask); | 7090 | send_covered, tmpmask); |
7091 | } | 7091 | } |
7092 | 7092 | ||
7093 | for (i = 0; i < MAX_NUMNODES; i++) { | 7093 | for (i = 0; i < nr_node_ids; i++) { |
7094 | /* Set up node groups */ | 7094 | /* Set up node groups */ |
7095 | struct sched_group *sg, *prev; | 7095 | struct sched_group *sg, *prev; |
7096 | SCHED_CPUMASK_VAR(nodemask, allmasks); | 7096 | SCHED_CPUMASK_VAR(nodemask, allmasks); |
@@ -7117,7 +7117,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7117 | goto error; | 7117 | goto error; |
7118 | } | 7118 | } |
7119 | sched_group_nodes[i] = sg; | 7119 | sched_group_nodes[i] = sg; |
7120 | for_each_cpu_mask(j, *nodemask) { | 7120 | for_each_cpu_mask_nr(j, *nodemask) { |
7121 | struct sched_domain *sd; | 7121 | struct sched_domain *sd; |
7122 | 7122 | ||
7123 | sd = &per_cpu(node_domains, j); | 7123 | sd = &per_cpu(node_domains, j); |
@@ -7129,9 +7129,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7129 | cpus_or(*covered, *covered, *nodemask); | 7129 | cpus_or(*covered, *covered, *nodemask); |
7130 | prev = sg; | 7130 | prev = sg; |
7131 | 7131 | ||
7132 | for (j = 0; j < MAX_NUMNODES; j++) { | 7132 | for (j = 0; j < nr_node_ids; j++) { |
7133 | SCHED_CPUMASK_VAR(notcovered, allmasks); | 7133 | SCHED_CPUMASK_VAR(notcovered, allmasks); |
7134 | int n = (i + j) % MAX_NUMNODES; | 7134 | int n = (i + j) % nr_node_ids; |
7135 | node_to_cpumask_ptr(pnodemask, n); | 7135 | node_to_cpumask_ptr(pnodemask, n); |
7136 | 7136 | ||
7137 | cpus_complement(*notcovered, *covered); | 7137 | cpus_complement(*notcovered, *covered); |
@@ -7163,28 +7163,28 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7163 | 7163 | ||
7164 | /* Calculate CPU power for physical packages and nodes */ | 7164 | /* Calculate CPU power for physical packages and nodes */ |
7165 | #ifdef CONFIG_SCHED_SMT | 7165 | #ifdef CONFIG_SCHED_SMT |
7166 | for_each_cpu_mask(i, *cpu_map) { | 7166 | for_each_cpu_mask_nr(i, *cpu_map) { |
7167 | struct sched_domain *sd = &per_cpu(cpu_domains, i); | 7167 | struct sched_domain *sd = &per_cpu(cpu_domains, i); |
7168 | 7168 | ||
7169 | init_sched_groups_power(i, sd); | 7169 | init_sched_groups_power(i, sd); |
7170 | } | 7170 | } |
7171 | #endif | 7171 | #endif |
7172 | #ifdef CONFIG_SCHED_MC | 7172 | #ifdef CONFIG_SCHED_MC |
7173 | for_each_cpu_mask(i, *cpu_map) { | 7173 | for_each_cpu_mask_nr(i, *cpu_map) { |
7174 | struct sched_domain *sd = &per_cpu(core_domains, i); | 7174 | struct sched_domain *sd = &per_cpu(core_domains, i); |
7175 | 7175 | ||
7176 | init_sched_groups_power(i, sd); | 7176 | init_sched_groups_power(i, sd); |
7177 | } | 7177 | } |
7178 | #endif | 7178 | #endif |
7179 | 7179 | ||
7180 | for_each_cpu_mask(i, *cpu_map) { | 7180 | for_each_cpu_mask_nr(i, *cpu_map) { |
7181 | struct sched_domain *sd = &per_cpu(phys_domains, i); | 7181 | struct sched_domain *sd = &per_cpu(phys_domains, i); |
7182 | 7182 | ||
7183 | init_sched_groups_power(i, sd); | 7183 | init_sched_groups_power(i, sd); |
7184 | } | 7184 | } |
7185 | 7185 | ||
7186 | #ifdef CONFIG_NUMA | 7186 | #ifdef CONFIG_NUMA |
7187 | for (i = 0; i < MAX_NUMNODES; i++) | 7187 | for (i = 0; i < nr_node_ids; i++) |
7188 | init_numa_sched_groups_power(sched_group_nodes[i]); | 7188 | init_numa_sched_groups_power(sched_group_nodes[i]); |
7189 | 7189 | ||
7190 | if (sd_allnodes) { | 7190 | if (sd_allnodes) { |
@@ -7197,7 +7197,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7197 | #endif | 7197 | #endif |
7198 | 7198 | ||
7199 | /* Attach the domains */ | 7199 | /* Attach the domains */ |
7200 | for_each_cpu_mask(i, *cpu_map) { | 7200 | for_each_cpu_mask_nr(i, *cpu_map) { |
7201 | struct sched_domain *sd; | 7201 | struct sched_domain *sd; |
7202 | #ifdef CONFIG_SCHED_SMT | 7202 | #ifdef CONFIG_SCHED_SMT |
7203 | sd = &per_cpu(cpu_domains, i); | 7203 | sd = &per_cpu(cpu_domains, i); |
@@ -7292,7 +7292,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) | |||
7292 | 7292 | ||
7293 | unregister_sched_domain_sysctl(); | 7293 | unregister_sched_domain_sysctl(); |
7294 | 7294 | ||
7295 | for_each_cpu_mask(i, *cpu_map) | 7295 | for_each_cpu_mask_nr(i, *cpu_map) |
7296 | cpu_attach_domain(NULL, &def_root_domain, i); | 7296 | cpu_attach_domain(NULL, &def_root_domain, i); |
7297 | synchronize_sched(); | 7297 | synchronize_sched(); |
7298 | arch_destroy_sched_domains(cpu_map, &tmpmask); | 7298 | arch_destroy_sched_domains(cpu_map, &tmpmask); |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 08ae848b71d4..74774bde5264 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -961,7 +961,7 @@ static int wake_idle(int cpu, struct task_struct *p) | |||
961 | || ((sd->flags & SD_WAKE_IDLE_FAR) | 961 | || ((sd->flags & SD_WAKE_IDLE_FAR) |
962 | && !task_hot(p, task_rq(p)->clock, sd))) { | 962 | && !task_hot(p, task_rq(p)->clock, sd))) { |
963 | cpus_and(tmp, sd->span, p->cpus_allowed); | 963 | cpus_and(tmp, sd->span, p->cpus_allowed); |
964 | for_each_cpu_mask(i, tmp) { | 964 | for_each_cpu_mask_nr(i, tmp) { |
965 | if (idle_cpu(i)) { | 965 | if (idle_cpu(i)) { |
966 | if (i != task_cpu(p)) { | 966 | if (i != task_cpu(p)) { |
967 | schedstat_inc(p, | 967 | schedstat_inc(p, |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 0f3c19197fa4..e757f370eb1b 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -231,7 +231,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
231 | return 1; | 231 | return 1; |
232 | 232 | ||
233 | span = sched_rt_period_mask(); | 233 | span = sched_rt_period_mask(); |
234 | for_each_cpu_mask(i, span) { | 234 | for_each_cpu_mask_nr(i, span) { |
235 | int enqueue = 0; | 235 | int enqueue = 0; |
236 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); | 236 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); |
237 | struct rq *rq = rq_of_rt_rq(rt_rq); | 237 | struct rq *rq = rq_of_rt_rq(rt_rq); |
@@ -273,7 +273,7 @@ static int balance_runtime(struct rt_rq *rt_rq) | |||
273 | 273 | ||
274 | spin_lock(&rt_b->rt_runtime_lock); | 274 | spin_lock(&rt_b->rt_runtime_lock); |
275 | rt_period = ktime_to_ns(rt_b->rt_period); | 275 | rt_period = ktime_to_ns(rt_b->rt_period); |
276 | for_each_cpu_mask(i, rd->span) { | 276 | for_each_cpu_mask_nr(i, rd->span) { |
277 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 277 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
278 | s64 diff; | 278 | s64 diff; |
279 | 279 | ||
@@ -1006,7 +1006,7 @@ static int pull_rt_task(struct rq *this_rq) | |||
1006 | 1006 | ||
1007 | next = pick_next_task_rt(this_rq); | 1007 | next = pick_next_task_rt(this_rq); |
1008 | 1008 | ||
1009 | for_each_cpu_mask(cpu, this_rq->rd->rto_mask) { | 1009 | for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) { |
1010 | if (this_cpu == cpu) | 1010 | if (this_cpu == cpu) |
1011 | continue; | 1011 | continue; |
1012 | 1012 | ||
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 4a23517169a6..06b17547f4e7 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
@@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) | |||
301 | return -EINVAL; | 301 | return -EINVAL; |
302 | 302 | ||
303 | if (isadd == REGISTER) { | 303 | if (isadd == REGISTER) { |
304 | for_each_cpu_mask(cpu, mask) { | 304 | for_each_cpu_mask_nr(cpu, mask) { |
305 | s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, | 305 | s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, |
306 | cpu_to_node(cpu)); | 306 | cpu_to_node(cpu)); |
307 | if (!s) | 307 | if (!s) |
@@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) | |||
320 | 320 | ||
321 | /* Deregister or cleanup */ | 321 | /* Deregister or cleanup */ |
322 | cleanup: | 322 | cleanup: |
323 | for_each_cpu_mask(cpu, mask) { | 323 | for_each_cpu_mask_nr(cpu, mask) { |
324 | listeners = &per_cpu(listener_array, cpu); | 324 | listeners = &per_cpu(listener_array, cpu); |
325 | down_write(&listeners->sem); | 325 | down_write(&listeners->sem); |
326 | list_for_each_entry_safe(s, tmp, &listeners->list, list) { | 326 | list_for_each_entry_safe(s, tmp, &listeners->list, list) { |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index dadde5361f32..60ceabd53f2e 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -145,9 +145,9 @@ static void clocksource_watchdog(unsigned long data) | |||
145 | * Cycle through CPUs to check if the CPUs stay | 145 | * Cycle through CPUs to check if the CPUs stay |
146 | * synchronized to each other. | 146 | * synchronized to each other. |
147 | */ | 147 | */ |
148 | int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map); | 148 | int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map); |
149 | 149 | ||
150 | if (next_cpu >= NR_CPUS) | 150 | if (next_cpu >= nr_cpu_ids) |
151 | next_cpu = first_cpu(cpu_online_map); | 151 | next_cpu = first_cpu(cpu_online_map); |
152 | watchdog_timer.expires += WATCHDOG_INTERVAL; | 152 | watchdog_timer.expires += WATCHDOG_INTERVAL; |
153 | add_timer_on(&watchdog_timer, next_cpu); | 153 | add_timer_on(&watchdog_timer, next_cpu); |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 57a1f02e5ec0..2d0a96346259 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -397,8 +397,7 @@ again: | |||
397 | mask = CPU_MASK_NONE; | 397 | mask = CPU_MASK_NONE; |
398 | now = ktime_get(); | 398 | now = ktime_get(); |
399 | /* Find all expired events */ | 399 | /* Find all expired events */ |
400 | for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; | 400 | for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) { |
401 | cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { | ||
402 | td = &per_cpu(tick_cpu_device, cpu); | 401 | td = &per_cpu(tick_cpu_device, cpu); |
403 | if (td->evtdev->next_event.tv64 <= now.tv64) | 402 | if (td->evtdev->next_event.tv64 <= now.tv64) |
404 | cpu_set(cpu, mask); | 403 | cpu_set(cpu, mask); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ce7799540c91..a6d36346d10a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -397,7 +397,7 @@ void flush_workqueue(struct workqueue_struct *wq) | |||
397 | might_sleep(); | 397 | might_sleep(); |
398 | lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 398 | lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); |
399 | lock_release(&wq->lockdep_map, 1, _THIS_IP_); | 399 | lock_release(&wq->lockdep_map, 1, _THIS_IP_); |
400 | for_each_cpu_mask(cpu, *cpu_map) | 400 | for_each_cpu_mask_nr(cpu, *cpu_map) |
401 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 401 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); |
402 | } | 402 | } |
403 | EXPORT_SYMBOL_GPL(flush_workqueue); | 403 | EXPORT_SYMBOL_GPL(flush_workqueue); |
@@ -477,7 +477,7 @@ static void wait_on_work(struct work_struct *work) | |||
477 | wq = cwq->wq; | 477 | wq = cwq->wq; |
478 | cpu_map = wq_cpu_map(wq); | 478 | cpu_map = wq_cpu_map(wq); |
479 | 479 | ||
480 | for_each_cpu_mask(cpu, *cpu_map) | 480 | for_each_cpu_mask_nr(cpu, *cpu_map) |
481 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 481 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
482 | } | 482 | } |
483 | 483 | ||
@@ -813,7 +813,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
813 | list_del(&wq->list); | 813 | list_del(&wq->list); |
814 | spin_unlock(&workqueue_lock); | 814 | spin_unlock(&workqueue_lock); |
815 | 815 | ||
816 | for_each_cpu_mask(cpu, *cpu_map) | 816 | for_each_cpu_mask_nr(cpu, *cpu_map) |
817 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); | 817 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); |
818 | put_online_cpus(); | 818 | put_online_cpus(); |
819 | 819 | ||
diff --git a/lib/cpumask.c b/lib/cpumask.c index bb4f76d3c3e7..5f97dc25ef9c 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
@@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp) | |||
15 | } | 15 | } |
16 | EXPORT_SYMBOL(__next_cpu); | 16 | EXPORT_SYMBOL(__next_cpu); |
17 | 17 | ||
18 | #if NR_CPUS > 64 | ||
19 | int __next_cpu_nr(int n, const cpumask_t *srcp) | ||
20 | { | ||
21 | return min_t(int, nr_cpu_ids, | ||
22 | find_next_bit(srcp->bits, nr_cpu_ids, n+1)); | ||
23 | } | ||
24 | EXPORT_SYMBOL(__next_cpu_nr); | ||
25 | #endif | ||
26 | |||
18 | int __any_online_cpu(const cpumask_t *mask) | 27 | int __any_online_cpu(const cpumask_t *mask) |
19 | { | 28 | { |
20 | int cpu; | 29 | int cpu; |
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c index 05f2b4009ccc..843364594e23 100644 --- a/mm/allocpercpu.c +++ b/mm/allocpercpu.c | |||
@@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(percpu_depopulate); | |||
35 | void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) | 35 | void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) |
36 | { | 36 | { |
37 | int cpu; | 37 | int cpu; |
38 | for_each_cpu_mask(cpu, *mask) | 38 | for_each_cpu_mask_nr(cpu, *mask) |
39 | percpu_depopulate(__pdata, cpu); | 39 | percpu_depopulate(__pdata, cpu); |
40 | } | 40 | } |
41 | EXPORT_SYMBOL_GPL(__percpu_depopulate_mask); | 41 | EXPORT_SYMBOL_GPL(__percpu_depopulate_mask); |
@@ -86,7 +86,7 @@ int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, | |||
86 | int cpu; | 86 | int cpu; |
87 | 87 | ||
88 | cpus_clear(populated); | 88 | cpus_clear(populated); |
89 | for_each_cpu_mask(cpu, *mask) | 89 | for_each_cpu_mask_nr(cpu, *mask) |
90 | if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) { | 90 | if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) { |
91 | __percpu_depopulate_mask(__pdata, &populated); | 91 | __percpu_depopulate_mask(__pdata, &populated); |
92 | return -ENOMEM; | 92 | return -ENOMEM; |
diff --git a/mm/vmstat.c b/mm/vmstat.c index db9eabb2c5b3..c3d4a781802f 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -26,7 +26,7 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) | |||
26 | 26 | ||
27 | memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); | 27 | memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); |
28 | 28 | ||
29 | for_each_cpu_mask(cpu, *cpumask) { | 29 | for_each_cpu_mask_nr(cpu, *cpumask) { |
30 | struct vm_event_state *this = &per_cpu(vm_event_states, cpu); | 30 | struct vm_event_state *this = &per_cpu(vm_event_states, cpu); |
31 | 31 | ||
32 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) | 32 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) |
diff --git a/net/core/dev.c b/net/core/dev.c index fca23a3bf12c..94d9d6f77e04 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2261,7 +2261,7 @@ out: | |||
2261 | */ | 2261 | */ |
2262 | if (!cpus_empty(net_dma.channel_mask)) { | 2262 | if (!cpus_empty(net_dma.channel_mask)) { |
2263 | int chan_idx; | 2263 | int chan_idx; |
2264 | for_each_cpu_mask(chan_idx, net_dma.channel_mask) { | 2264 | for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) { |
2265 | struct dma_chan *chan = net_dma.channels[chan_idx]; | 2265 | struct dma_chan *chan = net_dma.channels[chan_idx]; |
2266 | if (chan) | 2266 | if (chan) |
2267 | dma_async_memcpy_issue_pending(chan); | 2267 | dma_async_memcpy_issue_pending(chan); |
@@ -4322,7 +4322,7 @@ static void net_dma_rebalance(struct net_dma *net_dma) | |||
4322 | i = 0; | 4322 | i = 0; |
4323 | cpu = first_cpu(cpu_online_map); | 4323 | cpu = first_cpu(cpu_online_map); |
4324 | 4324 | ||
4325 | for_each_cpu_mask(chan_idx, net_dma->channel_mask) { | 4325 | for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) { |
4326 | chan = net_dma->channels[chan_idx]; | 4326 | chan = net_dma->channels[chan_idx]; |
4327 | 4327 | ||
4328 | n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) | 4328 | n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 918970762131..8de511070593 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -497,7 +497,7 @@ static void iucv_setmask_up(void) | |||
497 | /* Disable all cpu but the first in cpu_irq_cpumask. */ | 497 | /* Disable all cpu but the first in cpu_irq_cpumask. */ |
498 | cpumask = iucv_irq_cpumask; | 498 | cpumask = iucv_irq_cpumask; |
499 | cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); | 499 | cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); |
500 | for_each_cpu_mask(cpu, cpumask) | 500 | for_each_cpu_mask_nr(cpu, cpumask) |
501 | smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1); | 501 | smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1); |
502 | } | 502 | } |
503 | 503 | ||
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 01c7e311b904..d43cf8ddff67 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -603,7 +603,7 @@ __svc_create_thread(svc_thread_fn func, struct svc_serv *serv, | |||
603 | error = kernel_thread((int (*)(void *)) func, rqstp, 0); | 603 | error = kernel_thread((int (*)(void *)) func, rqstp, 0); |
604 | 604 | ||
605 | if (have_oldmask) | 605 | if (have_oldmask) |
606 | set_cpus_allowed(current, oldmask); | 606 | set_cpus_allowed_ptr(current, &oldmask); |
607 | 607 | ||
608 | if (error < 0) | 608 | if (error < 0) |
609 | goto out_thread; | 609 | goto out_thread; |