diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/io_apic_64.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 8 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 4 |
10 files changed, 30 insertions, 30 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index b0c8208df9fa..dd097b835839 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -202,7 +202,7 @@ static void drv_write(struct drv_cmd *cmd) | |||
202 | cpumask_t saved_mask = current->cpus_allowed; | 202 | cpumask_t saved_mask = current->cpus_allowed; |
203 | unsigned int i; | 203 | unsigned int i; |
204 | 204 | ||
205 | for_each_cpu_mask(i, cmd->mask) { | 205 | for_each_cpu_mask_nr(i, cmd->mask) { |
206 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); | 206 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); |
207 | do_drv_write(cmd); | 207 | do_drv_write(cmd); |
208 | } | 208 | } |
@@ -451,7 +451,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
451 | 451 | ||
452 | freqs.old = perf->states[perf->state].core_frequency * 1000; | 452 | freqs.old = perf->states[perf->state].core_frequency * 1000; |
453 | freqs.new = data->freq_table[next_state].frequency; | 453 | freqs.new = data->freq_table[next_state].frequency; |
454 | for_each_cpu_mask(i, cmd.mask) { | 454 | for_each_cpu_mask_nr(i, cmd.mask) { |
455 | freqs.cpu = i; | 455 | freqs.cpu = i; |
456 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 456 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
457 | } | 457 | } |
@@ -466,7 +466,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
466 | } | 466 | } |
467 | } | 467 | } |
468 | 468 | ||
469 | for_each_cpu_mask(i, cmd.mask) { | 469 | for_each_cpu_mask_nr(i, cmd.mask) { |
470 | freqs.cpu = i; | 470 | freqs.cpu = i; |
471 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 471 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
472 | } | 472 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 199e4e05e5dc..f1685fb91fbd 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, | |||
122 | return 0; | 122 | return 0; |
123 | 123 | ||
124 | /* notifiers */ | 124 | /* notifiers */ |
125 | for_each_cpu_mask(i, policy->cpus) { | 125 | for_each_cpu_mask_nr(i, policy->cpus) { |
126 | freqs.cpu = i; | 126 | freqs.cpu = i; |
127 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 127 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
128 | } | 128 | } |
@@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, | |||
130 | /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software | 130 | /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software |
131 | * Developer's Manual, Volume 3 | 131 | * Developer's Manual, Volume 3 |
132 | */ | 132 | */ |
133 | for_each_cpu_mask(i, policy->cpus) | 133 | for_each_cpu_mask_nr(i, policy->cpus) |
134 | cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); | 134 | cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); |
135 | 135 | ||
136 | /* notifiers */ | 136 | /* notifiers */ |
137 | for_each_cpu_mask(i, policy->cpus) { | 137 | for_each_cpu_mask_nr(i, policy->cpus) { |
138 | freqs.cpu = i; | 138 | freqs.cpu = i; |
139 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 139 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
140 | } | 140 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 206791eb46e3..c45ca6d4dce1 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -966,7 +966,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i | |||
966 | freqs.old = find_khz_freq_from_fid(data->currfid); | 966 | freqs.old = find_khz_freq_from_fid(data->currfid); |
967 | freqs.new = find_khz_freq_from_fid(fid); | 967 | freqs.new = find_khz_freq_from_fid(fid); |
968 | 968 | ||
969 | for_each_cpu_mask(i, *(data->available_cores)) { | 969 | for_each_cpu_mask_nr(i, *(data->available_cores)) { |
970 | freqs.cpu = i; | 970 | freqs.cpu = i; |
971 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 971 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
972 | } | 972 | } |
@@ -974,7 +974,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i | |||
974 | res = transition_fid_vid(data, fid, vid); | 974 | res = transition_fid_vid(data, fid, vid); |
975 | freqs.new = find_khz_freq_from_fid(data->currfid); | 975 | freqs.new = find_khz_freq_from_fid(data->currfid); |
976 | 976 | ||
977 | for_each_cpu_mask(i, *(data->available_cores)) { | 977 | for_each_cpu_mask_nr(i, *(data->available_cores)) { |
978 | freqs.cpu = i; | 978 | freqs.cpu = i; |
979 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 979 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
980 | } | 980 | } |
@@ -997,7 +997,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i | |||
997 | freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); | 997 | freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); |
998 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); | 998 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); |
999 | 999 | ||
1000 | for_each_cpu_mask(i, *(data->available_cores)) { | 1000 | for_each_cpu_mask_nr(i, *(data->available_cores)) { |
1001 | freqs.cpu = i; | 1001 | freqs.cpu = i; |
1002 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 1002 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
1003 | } | 1003 | } |
@@ -1005,7 +1005,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i | |||
1005 | res = transition_pstate(data, pstate); | 1005 | res = transition_pstate(data, pstate); |
1006 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); | 1006 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); |
1007 | 1007 | ||
1008 | for_each_cpu_mask(i, *(data->available_cores)) { | 1008 | for_each_cpu_mask_nr(i, *(data->available_cores)) { |
1009 | freqs.cpu = i; | 1009 | freqs.cpu = i; |
1010 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 1010 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
1011 | } | 1011 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 908dd347c67e..8b0dd6f2a1ac 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | |||
@@ -476,7 +476,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
476 | saved_mask = current->cpus_allowed; | 476 | saved_mask = current->cpus_allowed; |
477 | first_cpu = 1; | 477 | first_cpu = 1; |
478 | cpus_clear(covered_cpus); | 478 | cpus_clear(covered_cpus); |
479 | for_each_cpu_mask(j, online_policy_cpus) { | 479 | for_each_cpu_mask_nr(j, online_policy_cpus) { |
480 | /* | 480 | /* |
481 | * Support for SMP systems. | 481 | * Support for SMP systems. |
482 | * Make sure we are running on CPU that wants to change freq | 482 | * Make sure we are running on CPU that wants to change freq |
@@ -517,7 +517,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
517 | dprintk("target=%dkHz old=%d new=%d msr=%04x\n", | 517 | dprintk("target=%dkHz old=%d new=%d msr=%04x\n", |
518 | target_freq, freqs.old, freqs.new, msr); | 518 | target_freq, freqs.old, freqs.new, msr); |
519 | 519 | ||
520 | for_each_cpu_mask(k, online_policy_cpus) { | 520 | for_each_cpu_mask_nr(k, online_policy_cpus) { |
521 | freqs.cpu = k; | 521 | freqs.cpu = k; |
522 | cpufreq_notify_transition(&freqs, | 522 | cpufreq_notify_transition(&freqs, |
523 | CPUFREQ_PRECHANGE); | 523 | CPUFREQ_PRECHANGE); |
@@ -540,7 +540,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
540 | preempt_enable(); | 540 | preempt_enable(); |
541 | } | 541 | } |
542 | 542 | ||
543 | for_each_cpu_mask(k, online_policy_cpus) { | 543 | for_each_cpu_mask_nr(k, online_policy_cpus) { |
544 | freqs.cpu = k; | 544 | freqs.cpu = k; |
545 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 545 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
546 | } | 546 | } |
@@ -554,7 +554,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
554 | */ | 554 | */ |
555 | 555 | ||
556 | if (!cpus_empty(covered_cpus)) { | 556 | if (!cpus_empty(covered_cpus)) { |
557 | for_each_cpu_mask(j, covered_cpus) { | 557 | for_each_cpu_mask_nr(j, covered_cpus) { |
558 | set_cpus_allowed_ptr(current, | 558 | set_cpus_allowed_ptr(current, |
559 | &cpumask_of_cpu(j)); | 559 | &cpumask_of_cpu(j)); |
560 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | 560 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); |
@@ -564,7 +564,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
564 | tmp = freqs.new; | 564 | tmp = freqs.new; |
565 | freqs.new = freqs.old; | 565 | freqs.new = freqs.old; |
566 | freqs.old = tmp; | 566 | freqs.old = tmp; |
567 | for_each_cpu_mask(j, online_policy_cpus) { | 567 | for_each_cpu_mask_nr(j, online_policy_cpus) { |
568 | freqs.cpu = j; | 568 | freqs.cpu = j; |
569 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 569 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
570 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 570 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 1b50244b1fdf..191f7263c61d 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
@@ -279,7 +279,7 @@ static int speedstep_target (struct cpufreq_policy *policy, | |||
279 | 279 | ||
280 | cpus_allowed = current->cpus_allowed; | 280 | cpus_allowed = current->cpus_allowed; |
281 | 281 | ||
282 | for_each_cpu_mask(i, policy->cpus) { | 282 | for_each_cpu_mask_nr(i, policy->cpus) { |
283 | freqs.cpu = i; | 283 | freqs.cpu = i; |
284 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 284 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
285 | } | 285 | } |
@@ -292,7 +292,7 @@ static int speedstep_target (struct cpufreq_policy *policy, | |||
292 | /* allow to be run on all CPUs */ | 292 | /* allow to be run on all CPUs */ |
293 | set_cpus_allowed_ptr(current, &cpus_allowed); | 293 | set_cpus_allowed_ptr(current, &cpus_allowed); |
294 | 294 | ||
295 | for_each_cpu_mask(i, policy->cpus) { | 295 | for_each_cpu_mask_nr(i, policy->cpus) { |
296 | freqs.cpu = i; | 296 | freqs.cpu = i; |
297 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 297 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
298 | } | 298 | } |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 2c8afafa18e8..a7b0f8f1736b 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -489,7 +489,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
489 | int sibling; | 489 | int sibling; |
490 | 490 | ||
491 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 491 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
492 | for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) { | 492 | for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { |
493 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); | 493 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); |
494 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); | 494 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); |
495 | } | 495 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 7c9a813e1193..88736cadbaa6 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -527,7 +527,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
527 | if (err) | 527 | if (err) |
528 | goto out_free; | 528 | goto out_free; |
529 | 529 | ||
530 | for_each_cpu_mask(i, b->cpus) { | 530 | for_each_cpu_mask_nr(i, b->cpus) { |
531 | if (i == cpu) | 531 | if (i == cpu) |
532 | continue; | 532 | continue; |
533 | 533 | ||
@@ -617,7 +617,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) | |||
617 | #endif | 617 | #endif |
618 | 618 | ||
619 | /* remove all sibling symlinks before unregistering */ | 619 | /* remove all sibling symlinks before unregistering */ |
620 | for_each_cpu_mask(i, b->cpus) { | 620 | for_each_cpu_mask_nr(i, b->cpus) { |
621 | if (i == cpu) | 621 | if (i == cpu) |
622 | continue; | 622 | continue; |
623 | 623 | ||
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 6510cde36b35..bf27114773d5 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c | |||
@@ -731,7 +731,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask) | |||
731 | return 0; | 731 | return 0; |
732 | } | 732 | } |
733 | 733 | ||
734 | for_each_cpu_mask(cpu, mask) { | 734 | for_each_cpu_mask_nr(cpu, mask) { |
735 | cpumask_t domain, new_mask; | 735 | cpumask_t domain, new_mask; |
736 | int new_cpu; | 736 | int new_cpu; |
737 | int vector, offset; | 737 | int vector, offset; |
@@ -752,7 +752,7 @@ next: | |||
752 | continue; | 752 | continue; |
753 | if (vector == IA32_SYSCALL_VECTOR) | 753 | if (vector == IA32_SYSCALL_VECTOR) |
754 | goto next; | 754 | goto next; |
755 | for_each_cpu_mask(new_cpu, new_mask) | 755 | for_each_cpu_mask_nr(new_cpu, new_mask) |
756 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) | 756 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) |
757 | goto next; | 757 | goto next; |
758 | /* Found one! */ | 758 | /* Found one! */ |
@@ -762,7 +762,7 @@ next: | |||
762 | cfg->move_in_progress = 1; | 762 | cfg->move_in_progress = 1; |
763 | cfg->old_domain = cfg->domain; | 763 | cfg->old_domain = cfg->domain; |
764 | } | 764 | } |
765 | for_each_cpu_mask(new_cpu, new_mask) | 765 | for_each_cpu_mask_nr(new_cpu, new_mask) |
766 | per_cpu(vector_irq, new_cpu)[vector] = irq; | 766 | per_cpu(vector_irq, new_cpu)[vector] = irq; |
767 | cfg->vector = vector; | 767 | cfg->vector = vector; |
768 | cfg->domain = domain; | 768 | cfg->domain = domain; |
@@ -794,7 +794,7 @@ static void __clear_irq_vector(int irq) | |||
794 | 794 | ||
795 | vector = cfg->vector; | 795 | vector = cfg->vector; |
796 | cpus_and(mask, cfg->domain, cpu_online_map); | 796 | cpus_and(mask, cfg->domain, cpu_online_map); |
797 | for_each_cpu_mask(cpu, mask) | 797 | for_each_cpu_mask_nr(cpu, mask) |
798 | per_cpu(vector_irq, cpu)[vector] = -1; | 798 | per_cpu(vector_irq, cpu)[vector] = -1; |
799 | 799 | ||
800 | cfg->vector = 0; | 800 | cfg->vector = 0; |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 687376ab07e8..09b98cd6332c 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -438,7 +438,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
438 | cpu_set(cpu, cpu_sibling_setup_map); | 438 | cpu_set(cpu, cpu_sibling_setup_map); |
439 | 439 | ||
440 | if (smp_num_siblings > 1) { | 440 | if (smp_num_siblings > 1) { |
441 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | 441 | for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { |
442 | if (c->phys_proc_id == cpu_data(i).phys_proc_id && | 442 | if (c->phys_proc_id == cpu_data(i).phys_proc_id && |
443 | c->cpu_core_id == cpu_data(i).cpu_core_id) { | 443 | c->cpu_core_id == cpu_data(i).cpu_core_id) { |
444 | cpu_set(i, per_cpu(cpu_sibling_map, cpu)); | 444 | cpu_set(i, per_cpu(cpu_sibling_map, cpu)); |
@@ -461,7 +461,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
461 | return; | 461 | return; |
462 | } | 462 | } |
463 | 463 | ||
464 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | 464 | for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { |
465 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && | 465 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && |
466 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | 466 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { |
467 | cpu_set(i, c->llc_shared_map); | 467 | cpu_set(i, c->llc_shared_map); |
@@ -1230,7 +1230,7 @@ static void remove_siblinginfo(int cpu) | |||
1230 | int sibling; | 1230 | int sibling; |
1231 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 1231 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
1232 | 1232 | ||
1233 | for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { | 1233 | for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) { |
1234 | cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); | 1234 | cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); |
1235 | /*/ | 1235 | /*/ |
1236 | * last thread sibling in this cpu core going down | 1236 | * last thread sibling in this cpu core going down |
@@ -1239,7 +1239,7 @@ static void remove_siblinginfo(int cpu) | |||
1239 | cpu_data(sibling).booted_cores--; | 1239 | cpu_data(sibling).booted_cores--; |
1240 | } | 1240 | } |
1241 | 1241 | ||
1242 | for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) | 1242 | for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu)) |
1243 | cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); | 1243 | cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); |
1244 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | 1244 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); |
1245 | cpus_clear(per_cpu(cpu_core_map, cpu)); | 1245 | cpus_clear(per_cpu(cpu_core_map, cpu)); |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 233156f39b7f..463adecc5cba 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -351,7 +351,7 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) | |||
351 | 351 | ||
352 | cpus_and(mask, mask, cpu_online_map); | 352 | cpus_and(mask, mask, cpu_online_map); |
353 | 353 | ||
354 | for_each_cpu_mask(cpu, mask) | 354 | for_each_cpu_mask_nr(cpu, mask) |
355 | xen_send_IPI_one(cpu, vector); | 355 | xen_send_IPI_one(cpu, vector); |
356 | } | 356 | } |
357 | 357 | ||
@@ -362,7 +362,7 @@ void xen_smp_send_call_function_ipi(cpumask_t mask) | |||
362 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); | 362 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); |
363 | 363 | ||
364 | /* Make sure other vcpus get a chance to run if they need to. */ | 364 | /* Make sure other vcpus get a chance to run if they need to. */ |
365 | for_each_cpu_mask(cpu, mask) { | 365 | for_each_cpu_mask_nr(cpu, mask) { |
366 | if (xen_vcpu_stolen(cpu)) { | 366 | if (xen_vcpu_stolen(cpu)) { |
367 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); | 367 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); |
368 | break; | 368 | break; |