diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2015-03-04 19:19:16 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2015-03-04 23:55:04 -0500 |
commit | 5d2068da8d339e4dff8f9b9a1246e6a79e2949d8 (patch) | |
tree | 6365a79aa004b05476de252247bc8a27916e768f /arch/ia64/kernel/smpboot.c | |
parent | f9b531fe14a539ec2ad802b73c9638f324e4a4ff (diff) |
ia64: fix up obsolete cpu function usage.
Thanks to spatch, then a sweep for for_each_cpu_mask => for_each_cpu.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: linux-ia64@vger.kernel.org
Diffstat (limited to 'arch/ia64/kernel/smpboot.c')
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 42 |
1 files changed, 22 insertions, 20 deletions
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 547a48d78bd7..15051e9c2c6f 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -434,7 +434,7 @@ smp_callin (void) | |||
434 | /* | 434 | /* |
435 | * Allow the master to continue. | 435 | * Allow the master to continue. |
436 | */ | 436 | */ |
437 | cpu_set(cpuid, cpu_callin_map); | 437 | cpumask_set_cpu(cpuid, &cpu_callin_map); |
438 | Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid); | 438 | Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid); |
439 | } | 439 | } |
440 | 440 | ||
@@ -475,13 +475,13 @@ do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) | |||
475 | */ | 475 | */ |
476 | Dprintk("Waiting on callin_map ..."); | 476 | Dprintk("Waiting on callin_map ..."); |
477 | for (timeout = 0; timeout < 100000; timeout++) { | 477 | for (timeout = 0; timeout < 100000; timeout++) { |
478 | if (cpu_isset(cpu, cpu_callin_map)) | 478 | if (cpumask_test_cpu(cpu, &cpu_callin_map)) |
479 | break; /* It has booted */ | 479 | break; /* It has booted */ |
480 | udelay(100); | 480 | udelay(100); |
481 | } | 481 | } |
482 | Dprintk("\n"); | 482 | Dprintk("\n"); |
483 | 483 | ||
484 | if (!cpu_isset(cpu, cpu_callin_map)) { | 484 | if (!cpumask_test_cpu(cpu, &cpu_callin_map)) { |
485 | printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); | 485 | printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); |
486 | ia64_cpu_to_sapicid[cpu] = -1; | 486 | ia64_cpu_to_sapicid[cpu] = -1; |
487 | set_cpu_online(cpu, false); /* was set in smp_callin() */ | 487 | set_cpu_online(cpu, false); /* was set in smp_callin() */ |
@@ -541,7 +541,7 @@ smp_prepare_cpus (unsigned int max_cpus) | |||
541 | 541 | ||
542 | smp_setup_percpu_timer(); | 542 | smp_setup_percpu_timer(); |
543 | 543 | ||
544 | cpu_set(0, cpu_callin_map); | 544 | cpumask_set_cpu(0, &cpu_callin_map); |
545 | 545 | ||
546 | local_cpu_data->loops_per_jiffy = loops_per_jiffy; | 546 | local_cpu_data->loops_per_jiffy = loops_per_jiffy; |
547 | ia64_cpu_to_sapicid[0] = boot_cpu_id; | 547 | ia64_cpu_to_sapicid[0] = boot_cpu_id; |
@@ -565,7 +565,7 @@ smp_prepare_cpus (unsigned int max_cpus) | |||
565 | void smp_prepare_boot_cpu(void) | 565 | void smp_prepare_boot_cpu(void) |
566 | { | 566 | { |
567 | set_cpu_online(smp_processor_id(), true); | 567 | set_cpu_online(smp_processor_id(), true); |
568 | cpu_set(smp_processor_id(), cpu_callin_map); | 568 | cpumask_set_cpu(smp_processor_id(), &cpu_callin_map); |
569 | set_numa_node(cpu_to_node_map[smp_processor_id()]); | 569 | set_numa_node(cpu_to_node_map[smp_processor_id()]); |
570 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | 570 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
571 | paravirt_post_smp_prepare_boot_cpu(); | 571 | paravirt_post_smp_prepare_boot_cpu(); |
@@ -577,10 +577,10 @@ clear_cpu_sibling_map(int cpu) | |||
577 | { | 577 | { |
578 | int i; | 578 | int i; |
579 | 579 | ||
580 | for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) | 580 | for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) |
581 | cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); | 581 | cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); |
582 | for_each_cpu_mask(i, cpu_core_map[cpu]) | 582 | for_each_cpu(i, &cpu_core_map[cpu]) |
583 | cpu_clear(cpu, cpu_core_map[i]); | 583 | cpumask_clear_cpu(cpu, &cpu_core_map[i]); |
584 | 584 | ||
585 | per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; | 585 | per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; |
586 | } | 586 | } |
@@ -592,12 +592,12 @@ remove_siblinginfo(int cpu) | |||
592 | 592 | ||
593 | if (cpu_data(cpu)->threads_per_core == 1 && | 593 | if (cpu_data(cpu)->threads_per_core == 1 && |
594 | cpu_data(cpu)->cores_per_socket == 1) { | 594 | cpu_data(cpu)->cores_per_socket == 1) { |
595 | cpu_clear(cpu, cpu_core_map[cpu]); | 595 | cpumask_clear_cpu(cpu, &cpu_core_map[cpu]); |
596 | cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu)); | 596 | cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu)); |
597 | return; | 597 | return; |
598 | } | 598 | } |
599 | 599 | ||
600 | last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0); | 600 | last = (cpumask_weight(&cpu_core_map[cpu]) == 1 ? 1 : 0); |
601 | 601 | ||
602 | /* remove it from all sibling map's */ | 602 | /* remove it from all sibling map's */ |
603 | clear_cpu_sibling_map(cpu); | 603 | clear_cpu_sibling_map(cpu); |
@@ -673,7 +673,7 @@ int __cpu_disable(void) | |||
673 | remove_siblinginfo(cpu); | 673 | remove_siblinginfo(cpu); |
674 | fixup_irqs(); | 674 | fixup_irqs(); |
675 | local_flush_tlb_all(); | 675 | local_flush_tlb_all(); |
676 | cpu_clear(cpu, cpu_callin_map); | 676 | cpumask_clear_cpu(cpu, &cpu_callin_map); |
677 | return 0; | 677 | return 0; |
678 | } | 678 | } |
679 | 679 | ||
@@ -718,11 +718,13 @@ static inline void set_cpu_sibling_map(int cpu) | |||
718 | 718 | ||
719 | for_each_online_cpu(i) { | 719 | for_each_online_cpu(i) { |
720 | if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) { | 720 | if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) { |
721 | cpu_set(i, cpu_core_map[cpu]); | 721 | cpumask_set_cpu(i, &cpu_core_map[cpu]); |
722 | cpu_set(cpu, cpu_core_map[i]); | 722 | cpumask_set_cpu(cpu, &cpu_core_map[i]); |
723 | if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { | 723 | if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { |
724 | cpu_set(i, per_cpu(cpu_sibling_map, cpu)); | 724 | cpumask_set_cpu(i, |
725 | cpu_set(cpu, per_cpu(cpu_sibling_map, i)); | 725 | &per_cpu(cpu_sibling_map, cpu)); |
726 | cpumask_set_cpu(cpu, | ||
727 | &per_cpu(cpu_sibling_map, i)); | ||
726 | } | 728 | } |
727 | } | 729 | } |
728 | } | 730 | } |
@@ -742,7 +744,7 @@ __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
742 | * Already booted cpu? not valid anymore since we dont | 744 | * Already booted cpu? not valid anymore since we dont |
743 | * do idle loop tightspin anymore. | 745 | * do idle loop tightspin anymore. |
744 | */ | 746 | */ |
745 | if (cpu_isset(cpu, cpu_callin_map)) | 747 | if (cpumask_test_cpu(cpu, &cpu_callin_map)) |
746 | return -EINVAL; | 748 | return -EINVAL; |
747 | 749 | ||
748 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | 750 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
@@ -753,8 +755,8 @@ __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
753 | 755 | ||
754 | if (cpu_data(cpu)->threads_per_core == 1 && | 756 | if (cpu_data(cpu)->threads_per_core == 1 && |
755 | cpu_data(cpu)->cores_per_socket == 1) { | 757 | cpu_data(cpu)->cores_per_socket == 1) { |
756 | cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); | 758 | cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_map, cpu)); |
757 | cpu_set(cpu, cpu_core_map[cpu]); | 759 | cpumask_set_cpu(cpu, &cpu_core_map[cpu]); |
758 | return 0; | 760 | return 0; |
759 | } | 761 | } |
760 | 762 | ||