aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c8
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c10
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c4
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c4
-rw-r--r--arch/x86/kernel/io_apic_64.c8
-rw-r--r--arch/x86/kernel/smpboot.c8
-rw-r--r--arch/x86/xen/smp.c4
-rw-r--r--include/asm-x86/ipi.h2
11 files changed, 31 insertions, 31 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index b0c8208df9fa..dd097b835839 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -202,7 +202,7 @@ static void drv_write(struct drv_cmd *cmd)
202 cpumask_t saved_mask = current->cpus_allowed; 202 cpumask_t saved_mask = current->cpus_allowed;
203 unsigned int i; 203 unsigned int i;
204 204
205 for_each_cpu_mask(i, cmd->mask) { 205 for_each_cpu_mask_nr(i, cmd->mask) {
206 set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); 206 set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
207 do_drv_write(cmd); 207 do_drv_write(cmd);
208 } 208 }
@@ -451,7 +451,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
451 451
452 freqs.old = perf->states[perf->state].core_frequency * 1000; 452 freqs.old = perf->states[perf->state].core_frequency * 1000;
453 freqs.new = data->freq_table[next_state].frequency; 453 freqs.new = data->freq_table[next_state].frequency;
454 for_each_cpu_mask(i, cmd.mask) { 454 for_each_cpu_mask_nr(i, cmd.mask) {
455 freqs.cpu = i; 455 freqs.cpu = i;
456 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 456 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
457 } 457 }
@@ -466,7 +466,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
466 } 466 }
467 } 467 }
468 468
469 for_each_cpu_mask(i, cmd.mask) { 469 for_each_cpu_mask_nr(i, cmd.mask) {
470 freqs.cpu = i; 470 freqs.cpu = i;
471 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 471 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
472 } 472 }
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index 199e4e05e5dc..f1685fb91fbd 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
122 return 0; 122 return 0;
123 123
124 /* notifiers */ 124 /* notifiers */
125 for_each_cpu_mask(i, policy->cpus) { 125 for_each_cpu_mask_nr(i, policy->cpus) {
126 freqs.cpu = i; 126 freqs.cpu = i;
127 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 127 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
128 } 128 }
@@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
130 /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software 130 /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
131 * Developer's Manual, Volume 3 131 * Developer's Manual, Volume 3
132 */ 132 */
133 for_each_cpu_mask(i, policy->cpus) 133 for_each_cpu_mask_nr(i, policy->cpus)
134 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); 134 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
135 135
136 /* notifiers */ 136 /* notifiers */
137 for_each_cpu_mask(i, policy->cpus) { 137 for_each_cpu_mask_nr(i, policy->cpus) {
138 freqs.cpu = i; 138 freqs.cpu = i;
139 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 139 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
140 } 140 }
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 46d4034d9f37..06d6eea5e07a 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -966,7 +966,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i
966 freqs.old = find_khz_freq_from_fid(data->currfid); 966 freqs.old = find_khz_freq_from_fid(data->currfid);
967 freqs.new = find_khz_freq_from_fid(fid); 967 freqs.new = find_khz_freq_from_fid(fid);
968 968
969 for_each_cpu_mask(i, *(data->available_cores)) { 969 for_each_cpu_mask_nr(i, *(data->available_cores)) {
970 freqs.cpu = i; 970 freqs.cpu = i;
971 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 971 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
972 } 972 }
@@ -974,7 +974,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i
974 res = transition_fid_vid(data, fid, vid); 974 res = transition_fid_vid(data, fid, vid);
975 freqs.new = find_khz_freq_from_fid(data->currfid); 975 freqs.new = find_khz_freq_from_fid(data->currfid);
976 976
977 for_each_cpu_mask(i, *(data->available_cores)) { 977 for_each_cpu_mask_nr(i, *(data->available_cores)) {
978 freqs.cpu = i; 978 freqs.cpu = i;
979 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 979 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
980 } 980 }
@@ -997,7 +997,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
997 freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); 997 freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
998 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); 998 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
999 999
1000 for_each_cpu_mask(i, *(data->available_cores)) { 1000 for_each_cpu_mask_nr(i, *(data->available_cores)) {
1001 freqs.cpu = i; 1001 freqs.cpu = i;
1002 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 1002 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1003 } 1003 }
@@ -1005,7 +1005,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
1005 res = transition_pstate(data, pstate); 1005 res = transition_pstate(data, pstate);
1006 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); 1006 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
1007 1007
1008 for_each_cpu_mask(i, *(data->available_cores)) { 1008 for_each_cpu_mask_nr(i, *(data->available_cores)) {
1009 freqs.cpu = i; 1009 freqs.cpu = i;
1010 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 1010 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1011 } 1011 }
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index 908dd347c67e..8b0dd6f2a1ac 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -476,7 +476,7 @@ static int centrino_target (struct cpufreq_policy *policy,
476 saved_mask = current->cpus_allowed; 476 saved_mask = current->cpus_allowed;
477 first_cpu = 1; 477 first_cpu = 1;
478 cpus_clear(covered_cpus); 478 cpus_clear(covered_cpus);
479 for_each_cpu_mask(j, online_policy_cpus) { 479 for_each_cpu_mask_nr(j, online_policy_cpus) {
480 /* 480 /*
481 * Support for SMP systems. 481 * Support for SMP systems.
482 * Make sure we are running on CPU that wants to change freq 482 * Make sure we are running on CPU that wants to change freq
@@ -517,7 +517,7 @@ static int centrino_target (struct cpufreq_policy *policy,
517 dprintk("target=%dkHz old=%d new=%d msr=%04x\n", 517 dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
518 target_freq, freqs.old, freqs.new, msr); 518 target_freq, freqs.old, freqs.new, msr);
519 519
520 for_each_cpu_mask(k, online_policy_cpus) { 520 for_each_cpu_mask_nr(k, online_policy_cpus) {
521 freqs.cpu = k; 521 freqs.cpu = k;
522 cpufreq_notify_transition(&freqs, 522 cpufreq_notify_transition(&freqs,
523 CPUFREQ_PRECHANGE); 523 CPUFREQ_PRECHANGE);
@@ -540,7 +540,7 @@ static int centrino_target (struct cpufreq_policy *policy,
540 preempt_enable(); 540 preempt_enable();
541 } 541 }
542 542
543 for_each_cpu_mask(k, online_policy_cpus) { 543 for_each_cpu_mask_nr(k, online_policy_cpus) {
544 freqs.cpu = k; 544 freqs.cpu = k;
545 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 545 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
546 } 546 }
@@ -554,7 +554,7 @@ static int centrino_target (struct cpufreq_policy *policy,
554 */ 554 */
555 555
556 if (!cpus_empty(covered_cpus)) { 556 if (!cpus_empty(covered_cpus)) {
557 for_each_cpu_mask(j, covered_cpus) { 557 for_each_cpu_mask_nr(j, covered_cpus) {
558 set_cpus_allowed_ptr(current, 558 set_cpus_allowed_ptr(current,
559 &cpumask_of_cpu(j)); 559 &cpumask_of_cpu(j));
560 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); 560 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
@@ -564,7 +564,7 @@ static int centrino_target (struct cpufreq_policy *policy,
564 tmp = freqs.new; 564 tmp = freqs.new;
565 freqs.new = freqs.old; 565 freqs.new = freqs.old;
566 freqs.old = tmp; 566 freqs.old = tmp;
567 for_each_cpu_mask(j, online_policy_cpus) { 567 for_each_cpu_mask_nr(j, online_policy_cpus) {
568 freqs.cpu = j; 568 freqs.cpu = j;
569 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 569 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
570 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 570 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 1b50244b1fdf..191f7263c61d 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -279,7 +279,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
279 279
280 cpus_allowed = current->cpus_allowed; 280 cpus_allowed = current->cpus_allowed;
281 281
282 for_each_cpu_mask(i, policy->cpus) { 282 for_each_cpu_mask_nr(i, policy->cpus) {
283 freqs.cpu = i; 283 freqs.cpu = i;
284 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 284 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
285 } 285 }
@@ -292,7 +292,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
292 /* allow to be run on all CPUs */ 292 /* allow to be run on all CPUs */
293 set_cpus_allowed_ptr(current, &cpus_allowed); 293 set_cpus_allowed_ptr(current, &cpus_allowed);
294 294
295 for_each_cpu_mask(i, policy->cpus) { 295 for_each_cpu_mask_nr(i, policy->cpus) {
296 freqs.cpu = i; 296 freqs.cpu = i;
297 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 297 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
298 } 298 }
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 26d615dcb149..bfade3301c3a 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -488,7 +488,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
488 int sibling; 488 int sibling;
489 489
490 this_leaf = CPUID4_INFO_IDX(cpu, index); 490 this_leaf = CPUID4_INFO_IDX(cpu, index);
491 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) { 491 for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
492 sibling_leaf = CPUID4_INFO_IDX(sibling, index); 492 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
493 cpu_clear(cpu, sibling_leaf->shared_cpu_map); 493 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
494 } 494 }
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 7c9a813e1193..88736cadbaa6 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -527,7 +527,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
527 if (err) 527 if (err)
528 goto out_free; 528 goto out_free;
529 529
530 for_each_cpu_mask(i, b->cpus) { 530 for_each_cpu_mask_nr(i, b->cpus) {
531 if (i == cpu) 531 if (i == cpu)
532 continue; 532 continue;
533 533
@@ -617,7 +617,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
617#endif 617#endif
618 618
619 /* remove all sibling symlinks before unregistering */ 619 /* remove all sibling symlinks before unregistering */
620 for_each_cpu_mask(i, b->cpus) { 620 for_each_cpu_mask_nr(i, b->cpus) {
621 if (i == cpu) 621 if (i == cpu)
622 continue; 622 continue;
623 623
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index ef1a8dfcc529..e2838cbd2ff8 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -718,7 +718,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask)
718 return 0; 718 return 0;
719 } 719 }
720 720
721 for_each_cpu_mask(cpu, mask) { 721 for_each_cpu_mask_nr(cpu, mask) {
722 cpumask_t domain, new_mask; 722 cpumask_t domain, new_mask;
723 int new_cpu; 723 int new_cpu;
724 int vector, offset; 724 int vector, offset;
@@ -739,7 +739,7 @@ next:
739 continue; 739 continue;
740 if (vector == IA32_SYSCALL_VECTOR) 740 if (vector == IA32_SYSCALL_VECTOR)
741 goto next; 741 goto next;
742 for_each_cpu_mask(new_cpu, new_mask) 742 for_each_cpu_mask_nr(new_cpu, new_mask)
743 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 743 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
744 goto next; 744 goto next;
745 /* Found one! */ 745 /* Found one! */
@@ -749,7 +749,7 @@ next:
749 cfg->move_in_progress = 1; 749 cfg->move_in_progress = 1;
750 cfg->old_domain = cfg->domain; 750 cfg->old_domain = cfg->domain;
751 } 751 }
752 for_each_cpu_mask(new_cpu, new_mask) 752 for_each_cpu_mask_nr(new_cpu, new_mask)
753 per_cpu(vector_irq, new_cpu)[vector] = irq; 753 per_cpu(vector_irq, new_cpu)[vector] = irq;
754 cfg->vector = vector; 754 cfg->vector = vector;
755 cfg->domain = domain; 755 cfg->domain = domain;
@@ -781,7 +781,7 @@ static void __clear_irq_vector(int irq)
781 781
782 vector = cfg->vector; 782 vector = cfg->vector;
783 cpus_and(mask, cfg->domain, cpu_online_map); 783 cpus_and(mask, cfg->domain, cpu_online_map);
784 for_each_cpu_mask(cpu, mask) 784 for_each_cpu_mask_nr(cpu, mask)
785 per_cpu(vector_irq, cpu)[vector] = -1; 785 per_cpu(vector_irq, cpu)[vector] = -1;
786 786
787 cfg->vector = 0; 787 cfg->vector = 0;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 38988491c622..fff8ebaa554f 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -487,7 +487,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
487 cpu_set(cpu, cpu_sibling_setup_map); 487 cpu_set(cpu, cpu_sibling_setup_map);
488 488
489 if (smp_num_siblings > 1) { 489 if (smp_num_siblings > 1) {
490 for_each_cpu_mask(i, cpu_sibling_setup_map) { 490 for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
491 if (c->phys_proc_id == cpu_data(i).phys_proc_id && 491 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
492 c->cpu_core_id == cpu_data(i).cpu_core_id) { 492 c->cpu_core_id == cpu_data(i).cpu_core_id) {
493 cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 493 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
@@ -510,7 +510,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
510 return; 510 return;
511 } 511 }
512 512
513 for_each_cpu_mask(i, cpu_sibling_setup_map) { 513 for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
514 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && 514 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
515 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { 515 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
516 cpu_set(i, c->llc_shared_map); 516 cpu_set(i, c->llc_shared_map);
@@ -1298,7 +1298,7 @@ static void remove_siblinginfo(int cpu)
1298 int sibling; 1298 int sibling;
1299 struct cpuinfo_x86 *c = &cpu_data(cpu); 1299 struct cpuinfo_x86 *c = &cpu_data(cpu);
1300 1300
1301 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { 1301 for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
1302 cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); 1302 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
1303 /*/ 1303 /*/
1304 * last thread sibling in this cpu core going down 1304 * last thread sibling in this cpu core going down
@@ -1307,7 +1307,7 @@ static void remove_siblinginfo(int cpu)
1307 cpu_data(sibling).booted_cores--; 1307 cpu_data(sibling).booted_cores--;
1308 } 1308 }
1309 1309
1310 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) 1310 for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
1311 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); 1311 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1312 cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1312 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1313 cpus_clear(per_cpu(cpu_core_map, cpu)); 1313 cpus_clear(per_cpu(cpu_core_map, cpu));
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 94e69000f982..7a70638797ed 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -345,7 +345,7 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
345 345
346 cpus_and(mask, mask, cpu_online_map); 346 cpus_and(mask, mask, cpu_online_map);
347 347
348 for_each_cpu_mask(cpu, mask) 348 for_each_cpu_mask_nr(cpu, mask)
349 xen_send_IPI_one(cpu, vector); 349 xen_send_IPI_one(cpu, vector);
350} 350}
351 351
@@ -413,7 +413,7 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
413 413
414 /* Make sure other vcpus get a chance to run if they need to. */ 414 /* Make sure other vcpus get a chance to run if they need to. */
415 yield = false; 415 yield = false;
416 for_each_cpu_mask(cpu, mask) 416 for_each_cpu_mask_nr(cpu, mask)
417 if (xen_vcpu_stolen(cpu)) 417 if (xen_vcpu_stolen(cpu))
418 yield = true; 418 yield = true;
419 419
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h
index ecc80f341f37..5f7310aa3efd 100644
--- a/include/asm-x86/ipi.h
+++ b/include/asm-x86/ipi.h
@@ -121,7 +121,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
121 * - mbligh 121 * - mbligh
122 */ 122 */
123 local_irq_save(flags); 123 local_irq_save(flags);
124 for_each_cpu_mask(query_cpu, mask) { 124 for_each_cpu_mask_nr(query_cpu, mask) {
125 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), 125 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
126 vector, APIC_DEST_PHYSICAL); 126 vector, APIC_DEST_PHYSICAL);
127 } 127 }