diff options
author | Anton Blanchard <anton@samba.org> | 2005-05-01 11:58:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-05-01 11:58:47 -0400 |
commit | 0d8d4d42f2d00eb65262b49f4edd4cf7ef4eb6fc (patch) | |
tree | 56fdbffa1f3eddc68b56a1148fb372b296fc09da | |
parent | eeb24de431ac8c80fd13a2c479cd0eb51b70484e (diff) |
[PATCH] ppc64: use smp_mb and smp_wmb
Use smp_mb and smp_wmb. In particular smp_wmb is lighter weight than wmb.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/ppc64/kernel/smp.c | 12 | ||||
-rw-r--r-- | arch/ppc64/kernel/time.c | 12 |
2 files changed, 12 insertions, 12 deletions
diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c index 1c92da3e452..3b906cd9403 100644 --- a/arch/ppc64/kernel/smp.c +++ b/arch/ppc64/kernel/smp.c | |||
@@ -125,7 +125,7 @@ void __devinit smp_generic_kick_cpu(int nr) | |||
125 | * the processor will continue on to secondary_start | 125 | * the processor will continue on to secondary_start |
126 | */ | 126 | */ |
127 | paca[nr].cpu_start = 1; | 127 | paca[nr].cpu_start = 1; |
128 | mb(); | 128 | smp_mb(); |
129 | } | 129 | } |
130 | 130 | ||
131 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | 131 | #endif /* CONFIG_PPC_MULTIPLATFORM */ |
@@ -256,7 +256,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | |||
256 | } | 256 | } |
257 | 257 | ||
258 | call_data = &data; | 258 | call_data = &data; |
259 | wmb(); | 259 | smp_wmb(); |
260 | /* Send a message to all other CPUs and wait for them to respond */ | 260 | /* Send a message to all other CPUs and wait for them to respond */ |
261 | smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION); | 261 | smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION); |
262 | 262 | ||
@@ -431,7 +431,7 @@ int generic_cpu_enable(unsigned int cpu) | |||
431 | 431 | ||
432 | /* get the target out of it's holding state */ | 432 | /* get the target out of it's holding state */ |
433 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | 433 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
434 | wmb(); | 434 | smp_wmb(); |
435 | 435 | ||
436 | while (!cpu_online(cpu)) | 436 | while (!cpu_online(cpu)) |
437 | cpu_relax(); | 437 | cpu_relax(); |
@@ -447,7 +447,7 @@ void generic_cpu_die(unsigned int cpu) | |||
447 | int i; | 447 | int i; |
448 | 448 | ||
449 | for (i = 0; i < 100; i++) { | 449 | for (i = 0; i < 100; i++) { |
450 | rmb(); | 450 | smp_rmb(); |
451 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) | 451 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) |
452 | return; | 452 | return; |
453 | msleep(100); | 453 | msleep(100); |
@@ -463,7 +463,7 @@ void generic_mach_cpu_die(void) | |||
463 | cpu = smp_processor_id(); | 463 | cpu = smp_processor_id(); |
464 | printk(KERN_DEBUG "CPU%d offline\n", cpu); | 464 | printk(KERN_DEBUG "CPU%d offline\n", cpu); |
465 | __get_cpu_var(cpu_state) = CPU_DEAD; | 465 | __get_cpu_var(cpu_state) = CPU_DEAD; |
466 | wmb(); | 466 | smp_wmb(); |
467 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) | 467 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) |
468 | cpu_relax(); | 468 | cpu_relax(); |
469 | 469 | ||
@@ -515,7 +515,7 @@ int __devinit __cpu_up(unsigned int cpu) | |||
515 | * be written out to main store before we release | 515 | * be written out to main store before we release |
516 | * the processor. | 516 | * the processor. |
517 | */ | 517 | */ |
518 | mb(); | 518 | smp_mb(); |
519 | 519 | ||
520 | /* wake up cpus */ | 520 | /* wake up cpus */ |
521 | DBG("smp: kicking cpu %d\n", cpu); | 521 | DBG("smp: kicking cpu %d\n", cpu); |
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c index 77ded5a363b..772a465b49f 100644 --- a/arch/ppc64/kernel/time.c +++ b/arch/ppc64/kernel/time.c | |||
@@ -221,15 +221,15 @@ static __inline__ void timer_recalc_offset(unsigned long cur_tb) | |||
221 | temp_varp->tb_to_xs = do_gtod.varp->tb_to_xs; | 221 | temp_varp->tb_to_xs = do_gtod.varp->tb_to_xs; |
222 | temp_varp->tb_orig_stamp = new_tb_orig_stamp; | 222 | temp_varp->tb_orig_stamp = new_tb_orig_stamp; |
223 | temp_varp->stamp_xsec = new_stamp_xsec; | 223 | temp_varp->stamp_xsec = new_stamp_xsec; |
224 | mb(); | 224 | smp_mb(); |
225 | do_gtod.varp = temp_varp; | 225 | do_gtod.varp = temp_varp; |
226 | do_gtod.var_idx = temp_idx; | 226 | do_gtod.var_idx = temp_idx; |
227 | 227 | ||
228 | ++(systemcfg->tb_update_count); | 228 | ++(systemcfg->tb_update_count); |
229 | wmb(); | 229 | smp_wmb(); |
230 | systemcfg->tb_orig_stamp = new_tb_orig_stamp; | 230 | systemcfg->tb_orig_stamp = new_tb_orig_stamp; |
231 | systemcfg->stamp_xsec = new_stamp_xsec; | 231 | systemcfg->stamp_xsec = new_stamp_xsec; |
232 | wmb(); | 232 | smp_wmb(); |
233 | ++(systemcfg->tb_update_count); | 233 | ++(systemcfg->tb_update_count); |
234 | } | 234 | } |
235 | 235 | ||
@@ -648,7 +648,7 @@ void ppc_adjtimex(void) | |||
648 | temp_varp->tb_to_xs = new_tb_to_xs; | 648 | temp_varp->tb_to_xs = new_tb_to_xs; |
649 | temp_varp->stamp_xsec = new_stamp_xsec; | 649 | temp_varp->stamp_xsec = new_stamp_xsec; |
650 | temp_varp->tb_orig_stamp = do_gtod.varp->tb_orig_stamp; | 650 | temp_varp->tb_orig_stamp = do_gtod.varp->tb_orig_stamp; |
651 | mb(); | 651 | smp_mb(); |
652 | do_gtod.varp = temp_varp; | 652 | do_gtod.varp = temp_varp; |
653 | do_gtod.var_idx = temp_idx; | 653 | do_gtod.var_idx = temp_idx; |
654 | 654 | ||
@@ -662,10 +662,10 @@ void ppc_adjtimex(void) | |||
662 | * loops back and reads them again until this criteria is met. | 662 | * loops back and reads them again until this criteria is met. |
663 | */ | 663 | */ |
664 | ++(systemcfg->tb_update_count); | 664 | ++(systemcfg->tb_update_count); |
665 | wmb(); | 665 | smp_wmb(); |
666 | systemcfg->tb_to_xs = new_tb_to_xs; | 666 | systemcfg->tb_to_xs = new_tb_to_xs; |
667 | systemcfg->stamp_xsec = new_stamp_xsec; | 667 | systemcfg->stamp_xsec = new_stamp_xsec; |
668 | wmb(); | 668 | smp_wmb(); |
669 | ++(systemcfg->tb_update_count); | 669 | ++(systemcfg->tb_update_count); |
670 | 670 | ||
671 | write_sequnlock_irqrestore( &xtime_lock, flags ); | 671 | write_sequnlock_irqrestore( &xtime_lock, flags ); |