diff options
Diffstat (limited to 'arch/sparc/kernel/smp_64.c')
-rw-r--r-- | arch/sparc/kernel/smp_64.c | 58 |
1 files changed, 29 insertions, 29 deletions
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 9478da7fdb3e..99cb17251bb5 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -121,11 +121,11 @@ void __cpuinit smp_callin(void) | |||
121 | /* inform the notifiers about the new cpu */ | 121 | /* inform the notifiers about the new cpu */ |
122 | notify_cpu_starting(cpuid); | 122 | notify_cpu_starting(cpuid); |
123 | 123 | ||
124 | while (!cpu_isset(cpuid, smp_commenced_mask)) | 124 | while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) |
125 | rmb(); | 125 | rmb(); |
126 | 126 | ||
127 | ipi_call_lock_irq(); | 127 | ipi_call_lock_irq(); |
128 | cpu_set(cpuid, cpu_online_map); | 128 | set_cpu_online(cpuid, true); |
129 | ipi_call_unlock_irq(); | 129 | ipi_call_unlock_irq(); |
130 | 130 | ||
131 | /* idle thread is expected to have preempt disabled */ | 131 | /* idle thread is expected to have preempt disabled */ |
@@ -785,7 +785,7 @@ static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask | |||
785 | 785 | ||
786 | /* Send cross call to all processors mentioned in MASK_P | 786 | /* Send cross call to all processors mentioned in MASK_P |
787 | * except self. Really, there are only two cases currently, | 787 | * except self. Really, there are only two cases currently, |
788 | * "&cpu_online_map" and "&mm->cpu_vm_mask". | 788 | * "cpu_online_mask" and "mm_cpumask(mm)". |
789 | */ | 789 | */ |
790 | static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) | 790 | static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) |
791 | { | 791 | { |
@@ -797,7 +797,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d | |||
797 | /* Send cross call to all processors except self. */ | 797 | /* Send cross call to all processors except self. */ |
798 | static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) | 798 | static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) |
799 | { | 799 | { |
800 | smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map); | 800 | smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask); |
801 | } | 801 | } |
802 | 802 | ||
803 | extern unsigned long xcall_sync_tick; | 803 | extern unsigned long xcall_sync_tick; |
@@ -805,7 +805,7 @@ extern unsigned long xcall_sync_tick; | |||
805 | static void smp_start_sync_tick_client(int cpu) | 805 | static void smp_start_sync_tick_client(int cpu) |
806 | { | 806 | { |
807 | xcall_deliver((u64) &xcall_sync_tick, 0, 0, | 807 | xcall_deliver((u64) &xcall_sync_tick, 0, 0, |
808 | &cpumask_of_cpu(cpu)); | 808 | cpumask_of(cpu)); |
809 | } | 809 | } |
810 | 810 | ||
811 | extern unsigned long xcall_call_function; | 811 | extern unsigned long xcall_call_function; |
@@ -820,7 +820,7 @@ extern unsigned long xcall_call_function_single; | |||
820 | void arch_send_call_function_single_ipi(int cpu) | 820 | void arch_send_call_function_single_ipi(int cpu) |
821 | { | 821 | { |
822 | xcall_deliver((u64) &xcall_call_function_single, 0, 0, | 822 | xcall_deliver((u64) &xcall_call_function_single, 0, 0, |
823 | &cpumask_of_cpu(cpu)); | 823 | cpumask_of(cpu)); |
824 | } | 824 | } |
825 | 825 | ||
826 | void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) | 826 | void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) |
@@ -918,7 +918,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) | |||
918 | } | 918 | } |
919 | if (data0) { | 919 | if (data0) { |
920 | xcall_deliver(data0, __pa(pg_addr), | 920 | xcall_deliver(data0, __pa(pg_addr), |
921 | (u64) pg_addr, &cpumask_of_cpu(cpu)); | 921 | (u64) pg_addr, cpumask_of(cpu)); |
922 | #ifdef CONFIG_DEBUG_DCFLUSH | 922 | #ifdef CONFIG_DEBUG_DCFLUSH |
923 | atomic_inc(&dcpage_flushes_xcall); | 923 | atomic_inc(&dcpage_flushes_xcall); |
924 | #endif | 924 | #endif |
@@ -954,7 +954,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |||
954 | } | 954 | } |
955 | if (data0) { | 955 | if (data0) { |
956 | xcall_deliver(data0, __pa(pg_addr), | 956 | xcall_deliver(data0, __pa(pg_addr), |
957 | (u64) pg_addr, &cpu_online_map); | 957 | (u64) pg_addr, cpu_online_mask); |
958 | #ifdef CONFIG_DEBUG_DCFLUSH | 958 | #ifdef CONFIG_DEBUG_DCFLUSH |
959 | atomic_inc(&dcpage_flushes_xcall); | 959 | atomic_inc(&dcpage_flushes_xcall); |
960 | #endif | 960 | #endif |
@@ -1197,32 +1197,32 @@ void __devinit smp_fill_in_sib_core_maps(void) | |||
1197 | for_each_present_cpu(i) { | 1197 | for_each_present_cpu(i) { |
1198 | unsigned int j; | 1198 | unsigned int j; |
1199 | 1199 | ||
1200 | cpus_clear(cpu_core_map[i]); | 1200 | cpumask_clear(&cpu_core_map[i]); |
1201 | if (cpu_data(i).core_id == 0) { | 1201 | if (cpu_data(i).core_id == 0) { |
1202 | cpu_set(i, cpu_core_map[i]); | 1202 | cpumask_set_cpu(i, &cpu_core_map[i]); |
1203 | continue; | 1203 | continue; |
1204 | } | 1204 | } |
1205 | 1205 | ||
1206 | for_each_present_cpu(j) { | 1206 | for_each_present_cpu(j) { |
1207 | if (cpu_data(i).core_id == | 1207 | if (cpu_data(i).core_id == |
1208 | cpu_data(j).core_id) | 1208 | cpu_data(j).core_id) |
1209 | cpu_set(j, cpu_core_map[i]); | 1209 | cpumask_set_cpu(j, &cpu_core_map[i]); |
1210 | } | 1210 | } |
1211 | } | 1211 | } |
1212 | 1212 | ||
1213 | for_each_present_cpu(i) { | 1213 | for_each_present_cpu(i) { |
1214 | unsigned int j; | 1214 | unsigned int j; |
1215 | 1215 | ||
1216 | cpus_clear(per_cpu(cpu_sibling_map, i)); | 1216 | cpumask_clear(&per_cpu(cpu_sibling_map, i)); |
1217 | if (cpu_data(i).proc_id == -1) { | 1217 | if (cpu_data(i).proc_id == -1) { |
1218 | cpu_set(i, per_cpu(cpu_sibling_map, i)); | 1218 | cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i)); |
1219 | continue; | 1219 | continue; |
1220 | } | 1220 | } |
1221 | 1221 | ||
1222 | for_each_present_cpu(j) { | 1222 | for_each_present_cpu(j) { |
1223 | if (cpu_data(i).proc_id == | 1223 | if (cpu_data(i).proc_id == |
1224 | cpu_data(j).proc_id) | 1224 | cpu_data(j).proc_id) |
1225 | cpu_set(j, per_cpu(cpu_sibling_map, i)); | 1225 | cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i)); |
1226 | } | 1226 | } |
1227 | } | 1227 | } |
1228 | } | 1228 | } |
@@ -1232,10 +1232,10 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
1232 | int ret = smp_boot_one_cpu(cpu); | 1232 | int ret = smp_boot_one_cpu(cpu); |
1233 | 1233 | ||
1234 | if (!ret) { | 1234 | if (!ret) { |
1235 | cpu_set(cpu, smp_commenced_mask); | 1235 | cpumask_set_cpu(cpu, &smp_commenced_mask); |
1236 | while (!cpu_isset(cpu, cpu_online_map)) | 1236 | while (!cpu_online(cpu)) |
1237 | mb(); | 1237 | mb(); |
1238 | if (!cpu_isset(cpu, cpu_online_map)) { | 1238 | if (!cpu_online(cpu)) { |
1239 | ret = -ENODEV; | 1239 | ret = -ENODEV; |
1240 | } else { | 1240 | } else { |
1241 | /* On SUN4V, writes to %tick and %stick are | 1241 | /* On SUN4V, writes to %tick and %stick are |
@@ -1269,7 +1269,7 @@ void cpu_play_dead(void) | |||
1269 | tb->nonresum_mondo_pa, 0); | 1269 | tb->nonresum_mondo_pa, 0); |
1270 | } | 1270 | } |
1271 | 1271 | ||
1272 | cpu_clear(cpu, smp_commenced_mask); | 1272 | cpumask_clear_cpu(cpu, &smp_commenced_mask); |
1273 | membar_safe("#Sync"); | 1273 | membar_safe("#Sync"); |
1274 | 1274 | ||
1275 | local_irq_disable(); | 1275 | local_irq_disable(); |
@@ -1290,13 +1290,13 @@ int __cpu_disable(void) | |||
1290 | cpuinfo_sparc *c; | 1290 | cpuinfo_sparc *c; |
1291 | int i; | 1291 | int i; |
1292 | 1292 | ||
1293 | for_each_cpu_mask(i, cpu_core_map[cpu]) | 1293 | for_each_cpu(i, &cpu_core_map[cpu]) |
1294 | cpu_clear(cpu, cpu_core_map[i]); | 1294 | cpumask_clear_cpu(cpu, &cpu_core_map[i]); |
1295 | cpus_clear(cpu_core_map[cpu]); | 1295 | cpumask_clear(&cpu_core_map[cpu]); |
1296 | 1296 | ||
1297 | for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) | 1297 | for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) |
1298 | cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); | 1298 | cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); |
1299 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | 1299 | cpumask_clear(&per_cpu(cpu_sibling_map, cpu)); |
1300 | 1300 | ||
1301 | c = &cpu_data(cpu); | 1301 | c = &cpu_data(cpu); |
1302 | 1302 | ||
@@ -1313,7 +1313,7 @@ int __cpu_disable(void) | |||
1313 | local_irq_disable(); | 1313 | local_irq_disable(); |
1314 | 1314 | ||
1315 | ipi_call_lock(); | 1315 | ipi_call_lock(); |
1316 | cpu_clear(cpu, cpu_online_map); | 1316 | set_cpu_online(cpu, false); |
1317 | ipi_call_unlock(); | 1317 | ipi_call_unlock(); |
1318 | 1318 | ||
1319 | cpu_map_rebuild(); | 1319 | cpu_map_rebuild(); |
@@ -1327,11 +1327,11 @@ void __cpu_die(unsigned int cpu) | |||
1327 | 1327 | ||
1328 | for (i = 0; i < 100; i++) { | 1328 | for (i = 0; i < 100; i++) { |
1329 | smp_rmb(); | 1329 | smp_rmb(); |
1330 | if (!cpu_isset(cpu, smp_commenced_mask)) | 1330 | if (!cpumask_test_cpu(cpu, &smp_commenced_mask)) |
1331 | break; | 1331 | break; |
1332 | msleep(100); | 1332 | msleep(100); |
1333 | } | 1333 | } |
1334 | if (cpu_isset(cpu, smp_commenced_mask)) { | 1334 | if (cpumask_test_cpu(cpu, &smp_commenced_mask)) { |
1335 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | 1335 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); |
1336 | } else { | 1336 | } else { |
1337 | #if defined(CONFIG_SUN_LDOMS) | 1337 | #if defined(CONFIG_SUN_LDOMS) |
@@ -1341,7 +1341,7 @@ void __cpu_die(unsigned int cpu) | |||
1341 | do { | 1341 | do { |
1342 | hv_err = sun4v_cpu_stop(cpu); | 1342 | hv_err = sun4v_cpu_stop(cpu); |
1343 | if (hv_err == HV_EOK) { | 1343 | if (hv_err == HV_EOK) { |
1344 | cpu_clear(cpu, cpu_present_map); | 1344 | set_cpu_present(cpu, false); |
1345 | break; | 1345 | break; |
1346 | } | 1346 | } |
1347 | } while (--limit > 0); | 1347 | } while (--limit > 0); |
@@ -1362,7 +1362,7 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
1362 | void smp_send_reschedule(int cpu) | 1362 | void smp_send_reschedule(int cpu) |
1363 | { | 1363 | { |
1364 | xcall_deliver((u64) &xcall_receive_signal, 0, 0, | 1364 | xcall_deliver((u64) &xcall_receive_signal, 0, 0, |
1365 | &cpumask_of_cpu(cpu)); | 1365 | cpumask_of(cpu)); |
1366 | } | 1366 | } |
1367 | 1367 | ||
1368 | void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) | 1368 | void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) |