diff options
Diffstat (limited to 'arch/sparc/kernel/smp_64.c')
-rw-r--r-- | arch/sparc/kernel/smp_64.c | 72 |
1 files changed, 36 insertions, 36 deletions
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index b6a2b8f47040..99cb17251bb5 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <asm/mdesc.h> | 49 | #include <asm/mdesc.h> |
50 | #include <asm/ldc.h> | 50 | #include <asm/ldc.h> |
51 | #include <asm/hypervisor.h> | 51 | #include <asm/hypervisor.h> |
52 | #include <asm/pcr.h> | ||
52 | 53 | ||
53 | #include "cpumap.h" | 54 | #include "cpumap.h" |
54 | 55 | ||
@@ -120,11 +121,11 @@ void __cpuinit smp_callin(void) | |||
120 | /* inform the notifiers about the new cpu */ | 121 | /* inform the notifiers about the new cpu */ |
121 | notify_cpu_starting(cpuid); | 122 | notify_cpu_starting(cpuid); |
122 | 123 | ||
123 | while (!cpu_isset(cpuid, smp_commenced_mask)) | 124 | while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) |
124 | rmb(); | 125 | rmb(); |
125 | 126 | ||
126 | ipi_call_lock_irq(); | 127 | ipi_call_lock_irq(); |
127 | cpu_set(cpuid, cpu_online_map); | 128 | set_cpu_online(cpuid, true); |
128 | ipi_call_unlock_irq(); | 129 | ipi_call_unlock_irq(); |
129 | 130 | ||
130 | /* idle thread is expected to have preempt disabled */ | 131 | /* idle thread is expected to have preempt disabled */ |
@@ -188,7 +189,7 @@ static inline long get_delta (long *rt, long *master) | |||
188 | void smp_synchronize_tick_client(void) | 189 | void smp_synchronize_tick_client(void) |
189 | { | 190 | { |
190 | long i, delta, adj, adjust_latency = 0, done = 0; | 191 | long i, delta, adj, adjust_latency = 0, done = 0; |
191 | unsigned long flags, rt, master_time_stamp, bound; | 192 | unsigned long flags, rt, master_time_stamp; |
192 | #if DEBUG_TICK_SYNC | 193 | #if DEBUG_TICK_SYNC |
193 | struct { | 194 | struct { |
194 | long rt; /* roundtrip time */ | 195 | long rt; /* roundtrip time */ |
@@ -207,10 +208,8 @@ void smp_synchronize_tick_client(void) | |||
207 | { | 208 | { |
208 | for (i = 0; i < NUM_ROUNDS; i++) { | 209 | for (i = 0; i < NUM_ROUNDS; i++) { |
209 | delta = get_delta(&rt, &master_time_stamp); | 210 | delta = get_delta(&rt, &master_time_stamp); |
210 | if (delta == 0) { | 211 | if (delta == 0) |
211 | done = 1; /* let's lock on to this... */ | 212 | done = 1; /* let's lock on to this... */ |
212 | bound = rt; | ||
213 | } | ||
214 | 213 | ||
215 | if (!done) { | 214 | if (!done) { |
216 | if (i > 0) { | 215 | if (i > 0) { |
@@ -786,7 +785,7 @@ static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask | |||
786 | 785 | ||
787 | /* Send cross call to all processors mentioned in MASK_P | 786 | /* Send cross call to all processors mentioned in MASK_P |
788 | * except self. Really, there are only two cases currently, | 787 | * except self. Really, there are only two cases currently, |
789 | * "&cpu_online_map" and "&mm->cpu_vm_mask". | 788 | * "cpu_online_mask" and "mm_cpumask(mm)". |
790 | */ | 789 | */ |
791 | static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) | 790 | static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) |
792 | { | 791 | { |
@@ -798,7 +797,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d | |||
798 | /* Send cross call to all processors except self. */ | 797 | /* Send cross call to all processors except self. */ |
799 | static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) | 798 | static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) |
800 | { | 799 | { |
801 | smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map); | 800 | smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask); |
802 | } | 801 | } |
803 | 802 | ||
804 | extern unsigned long xcall_sync_tick; | 803 | extern unsigned long xcall_sync_tick; |
@@ -806,7 +805,7 @@ extern unsigned long xcall_sync_tick; | |||
806 | static void smp_start_sync_tick_client(int cpu) | 805 | static void smp_start_sync_tick_client(int cpu) |
807 | { | 806 | { |
808 | xcall_deliver((u64) &xcall_sync_tick, 0, 0, | 807 | xcall_deliver((u64) &xcall_sync_tick, 0, 0, |
809 | &cpumask_of_cpu(cpu)); | 808 | cpumask_of(cpu)); |
810 | } | 809 | } |
811 | 810 | ||
812 | extern unsigned long xcall_call_function; | 811 | extern unsigned long xcall_call_function; |
@@ -821,7 +820,7 @@ extern unsigned long xcall_call_function_single; | |||
821 | void arch_send_call_function_single_ipi(int cpu) | 820 | void arch_send_call_function_single_ipi(int cpu) |
822 | { | 821 | { |
823 | xcall_deliver((u64) &xcall_call_function_single, 0, 0, | 822 | xcall_deliver((u64) &xcall_call_function_single, 0, 0, |
824 | &cpumask_of_cpu(cpu)); | 823 | cpumask_of(cpu)); |
825 | } | 824 | } |
826 | 825 | ||
827 | void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) | 826 | void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) |
@@ -919,7 +918,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) | |||
919 | } | 918 | } |
920 | if (data0) { | 919 | if (data0) { |
921 | xcall_deliver(data0, __pa(pg_addr), | 920 | xcall_deliver(data0, __pa(pg_addr), |
922 | (u64) pg_addr, &cpumask_of_cpu(cpu)); | 921 | (u64) pg_addr, cpumask_of(cpu)); |
923 | #ifdef CONFIG_DEBUG_DCFLUSH | 922 | #ifdef CONFIG_DEBUG_DCFLUSH |
924 | atomic_inc(&dcpage_flushes_xcall); | 923 | atomic_inc(&dcpage_flushes_xcall); |
925 | #endif | 924 | #endif |
@@ -932,13 +931,12 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) | |||
932 | void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | 931 | void flush_dcache_page_all(struct mm_struct *mm, struct page *page) |
933 | { | 932 | { |
934 | void *pg_addr; | 933 | void *pg_addr; |
935 | int this_cpu; | ||
936 | u64 data0; | 934 | u64 data0; |
937 | 935 | ||
938 | if (tlb_type == hypervisor) | 936 | if (tlb_type == hypervisor) |
939 | return; | 937 | return; |
940 | 938 | ||
941 | this_cpu = get_cpu(); | 939 | preempt_disable(); |
942 | 940 | ||
943 | #ifdef CONFIG_DEBUG_DCFLUSH | 941 | #ifdef CONFIG_DEBUG_DCFLUSH |
944 | atomic_inc(&dcpage_flushes); | 942 | atomic_inc(&dcpage_flushes); |
@@ -956,14 +954,14 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |||
956 | } | 954 | } |
957 | if (data0) { | 955 | if (data0) { |
958 | xcall_deliver(data0, __pa(pg_addr), | 956 | xcall_deliver(data0, __pa(pg_addr), |
959 | (u64) pg_addr, &cpu_online_map); | 957 | (u64) pg_addr, cpu_online_mask); |
960 | #ifdef CONFIG_DEBUG_DCFLUSH | 958 | #ifdef CONFIG_DEBUG_DCFLUSH |
961 | atomic_inc(&dcpage_flushes_xcall); | 959 | atomic_inc(&dcpage_flushes_xcall); |
962 | #endif | 960 | #endif |
963 | } | 961 | } |
964 | __local_flush_dcache_page(page); | 962 | __local_flush_dcache_page(page); |
965 | 963 | ||
966 | put_cpu(); | 964 | preempt_enable(); |
967 | } | 965 | } |
968 | 966 | ||
969 | void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) | 967 | void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) |
@@ -1199,32 +1197,32 @@ void __devinit smp_fill_in_sib_core_maps(void) | |||
1199 | for_each_present_cpu(i) { | 1197 | for_each_present_cpu(i) { |
1200 | unsigned int j; | 1198 | unsigned int j; |
1201 | 1199 | ||
1202 | cpus_clear(cpu_core_map[i]); | 1200 | cpumask_clear(&cpu_core_map[i]); |
1203 | if (cpu_data(i).core_id == 0) { | 1201 | if (cpu_data(i).core_id == 0) { |
1204 | cpu_set(i, cpu_core_map[i]); | 1202 | cpumask_set_cpu(i, &cpu_core_map[i]); |
1205 | continue; | 1203 | continue; |
1206 | } | 1204 | } |
1207 | 1205 | ||
1208 | for_each_present_cpu(j) { | 1206 | for_each_present_cpu(j) { |
1209 | if (cpu_data(i).core_id == | 1207 | if (cpu_data(i).core_id == |
1210 | cpu_data(j).core_id) | 1208 | cpu_data(j).core_id) |
1211 | cpu_set(j, cpu_core_map[i]); | 1209 | cpumask_set_cpu(j, &cpu_core_map[i]); |
1212 | } | 1210 | } |
1213 | } | 1211 | } |
1214 | 1212 | ||
1215 | for_each_present_cpu(i) { | 1213 | for_each_present_cpu(i) { |
1216 | unsigned int j; | 1214 | unsigned int j; |
1217 | 1215 | ||
1218 | cpus_clear(per_cpu(cpu_sibling_map, i)); | 1216 | cpumask_clear(&per_cpu(cpu_sibling_map, i)); |
1219 | if (cpu_data(i).proc_id == -1) { | 1217 | if (cpu_data(i).proc_id == -1) { |
1220 | cpu_set(i, per_cpu(cpu_sibling_map, i)); | 1218 | cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i)); |
1221 | continue; | 1219 | continue; |
1222 | } | 1220 | } |
1223 | 1221 | ||
1224 | for_each_present_cpu(j) { | 1222 | for_each_present_cpu(j) { |
1225 | if (cpu_data(i).proc_id == | 1223 | if (cpu_data(i).proc_id == |
1226 | cpu_data(j).proc_id) | 1224 | cpu_data(j).proc_id) |
1227 | cpu_set(j, per_cpu(cpu_sibling_map, i)); | 1225 | cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i)); |
1228 | } | 1226 | } |
1229 | } | 1227 | } |
1230 | } | 1228 | } |
@@ -1234,10 +1232,10 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
1234 | int ret = smp_boot_one_cpu(cpu); | 1232 | int ret = smp_boot_one_cpu(cpu); |
1235 | 1233 | ||
1236 | if (!ret) { | 1234 | if (!ret) { |
1237 | cpu_set(cpu, smp_commenced_mask); | 1235 | cpumask_set_cpu(cpu, &smp_commenced_mask); |
1238 | while (!cpu_isset(cpu, cpu_online_map)) | 1236 | while (!cpu_online(cpu)) |
1239 | mb(); | 1237 | mb(); |
1240 | if (!cpu_isset(cpu, cpu_online_map)) { | 1238 | if (!cpu_online(cpu)) { |
1241 | ret = -ENODEV; | 1239 | ret = -ENODEV; |
1242 | } else { | 1240 | } else { |
1243 | /* On SUN4V, writes to %tick and %stick are | 1241 | /* On SUN4V, writes to %tick and %stick are |
@@ -1271,7 +1269,7 @@ void cpu_play_dead(void) | |||
1271 | tb->nonresum_mondo_pa, 0); | 1269 | tb->nonresum_mondo_pa, 0); |
1272 | } | 1270 | } |
1273 | 1271 | ||
1274 | cpu_clear(cpu, smp_commenced_mask); | 1272 | cpumask_clear_cpu(cpu, &smp_commenced_mask); |
1275 | membar_safe("#Sync"); | 1273 | membar_safe("#Sync"); |
1276 | 1274 | ||
1277 | local_irq_disable(); | 1275 | local_irq_disable(); |
@@ -1292,13 +1290,13 @@ int __cpu_disable(void) | |||
1292 | cpuinfo_sparc *c; | 1290 | cpuinfo_sparc *c; |
1293 | int i; | 1291 | int i; |
1294 | 1292 | ||
1295 | for_each_cpu_mask(i, cpu_core_map[cpu]) | 1293 | for_each_cpu(i, &cpu_core_map[cpu]) |
1296 | cpu_clear(cpu, cpu_core_map[i]); | 1294 | cpumask_clear_cpu(cpu, &cpu_core_map[i]); |
1297 | cpus_clear(cpu_core_map[cpu]); | 1295 | cpumask_clear(&cpu_core_map[cpu]); |
1298 | 1296 | ||
1299 | for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) | 1297 | for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) |
1300 | cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); | 1298 | cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); |
1301 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | 1299 | cpumask_clear(&per_cpu(cpu_sibling_map, cpu)); |
1302 | 1300 | ||
1303 | c = &cpu_data(cpu); | 1301 | c = &cpu_data(cpu); |
1304 | 1302 | ||
@@ -1315,7 +1313,7 @@ int __cpu_disable(void) | |||
1315 | local_irq_disable(); | 1313 | local_irq_disable(); |
1316 | 1314 | ||
1317 | ipi_call_lock(); | 1315 | ipi_call_lock(); |
1318 | cpu_clear(cpu, cpu_online_map); | 1316 | set_cpu_online(cpu, false); |
1319 | ipi_call_unlock(); | 1317 | ipi_call_unlock(); |
1320 | 1318 | ||
1321 | cpu_map_rebuild(); | 1319 | cpu_map_rebuild(); |
@@ -1329,11 +1327,11 @@ void __cpu_die(unsigned int cpu) | |||
1329 | 1327 | ||
1330 | for (i = 0; i < 100; i++) { | 1328 | for (i = 0; i < 100; i++) { |
1331 | smp_rmb(); | 1329 | smp_rmb(); |
1332 | if (!cpu_isset(cpu, smp_commenced_mask)) | 1330 | if (!cpumask_test_cpu(cpu, &smp_commenced_mask)) |
1333 | break; | 1331 | break; |
1334 | msleep(100); | 1332 | msleep(100); |
1335 | } | 1333 | } |
1336 | if (cpu_isset(cpu, smp_commenced_mask)) { | 1334 | if (cpumask_test_cpu(cpu, &smp_commenced_mask)) { |
1337 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | 1335 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); |
1338 | } else { | 1336 | } else { |
1339 | #if defined(CONFIG_SUN_LDOMS) | 1337 | #if defined(CONFIG_SUN_LDOMS) |
@@ -1343,7 +1341,7 @@ void __cpu_die(unsigned int cpu) | |||
1343 | do { | 1341 | do { |
1344 | hv_err = sun4v_cpu_stop(cpu); | 1342 | hv_err = sun4v_cpu_stop(cpu); |
1345 | if (hv_err == HV_EOK) { | 1343 | if (hv_err == HV_EOK) { |
1346 | cpu_clear(cpu, cpu_present_map); | 1344 | set_cpu_present(cpu, false); |
1347 | break; | 1345 | break; |
1348 | } | 1346 | } |
1349 | } while (--limit > 0); | 1347 | } while (--limit > 0); |
@@ -1358,17 +1356,19 @@ void __cpu_die(unsigned int cpu) | |||
1358 | 1356 | ||
1359 | void __init smp_cpus_done(unsigned int max_cpus) | 1357 | void __init smp_cpus_done(unsigned int max_cpus) |
1360 | { | 1358 | { |
1359 | pcr_arch_init(); | ||
1361 | } | 1360 | } |
1362 | 1361 | ||
1363 | void smp_send_reschedule(int cpu) | 1362 | void smp_send_reschedule(int cpu) |
1364 | { | 1363 | { |
1365 | xcall_deliver((u64) &xcall_receive_signal, 0, 0, | 1364 | xcall_deliver((u64) &xcall_receive_signal, 0, 0, |
1366 | &cpumask_of_cpu(cpu)); | 1365 | cpumask_of(cpu)); |
1367 | } | 1366 | } |
1368 | 1367 | ||
1369 | void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) | 1368 | void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) |
1370 | { | 1369 | { |
1371 | clear_softint(1 << irq); | 1370 | clear_softint(1 << irq); |
1371 | scheduler_ipi(); | ||
1372 | } | 1372 | } |
1373 | 1373 | ||
1374 | /* This is a nop because we capture all other cpus | 1374 | /* This is a nop because we capture all other cpus |