aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-03-17 13:06:10 -0400
committerIngo Molnar <mingo@kernel.org>2014-04-18 08:20:48 -0400
commit4e857c58efeb99393cba5a5d0d8ec7117183137c (patch)
tree3f6fd464e4fddb2fe90374c075c9d06603cf8bbc /kernel
parent1b15611e1c30b37abe393d411c316cd659920bf5 (diff)
arch: Mass conversion of smp_mb__*()
Mostly scripted conversion of the smp_mb__* barriers. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/n/tip-55dhyhocezdw1dg7u19hmh1u@git.kernel.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-arch@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/debug/debug_core.c4
-rw-r--r--kernel/futex.c4
-rw-r--r--kernel/kmod.c2
-rw-r--r--kernel/rcu/tree.c22
-rw-r--r--kernel/rcu/tree_plugin.h8
-rw-r--r--kernel/sched/cpupri.c6
-rw-r--r--kernel/sched/wait.c2
7 files changed, 24 insertions, 24 deletions
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 2956c8da1605..1adf62b39b96 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -534,7 +534,7 @@ return_normal:
534 kgdb_info[cpu].exception_state &= 534 kgdb_info[cpu].exception_state &=
535 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); 535 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
536 kgdb_info[cpu].enter_kgdb--; 536 kgdb_info[cpu].enter_kgdb--;
537 smp_mb__before_atomic_dec(); 537 smp_mb__before_atomic();
538 atomic_dec(&slaves_in_kgdb); 538 atomic_dec(&slaves_in_kgdb);
539 dbg_touch_watchdogs(); 539 dbg_touch_watchdogs();
540 local_irq_restore(flags); 540 local_irq_restore(flags);
@@ -662,7 +662,7 @@ kgdb_restore:
662 kgdb_info[cpu].exception_state &= 662 kgdb_info[cpu].exception_state &=
663 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); 663 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
664 kgdb_info[cpu].enter_kgdb--; 664 kgdb_info[cpu].enter_kgdb--;
665 smp_mb__before_atomic_dec(); 665 smp_mb__before_atomic();
666 atomic_dec(&masters_in_kgdb); 666 atomic_dec(&masters_in_kgdb);
667 /* Free kgdb_active */ 667 /* Free kgdb_active */
668 atomic_set(&kgdb_active, -1); 668 atomic_set(&kgdb_active, -1);
diff --git a/kernel/futex.c b/kernel/futex.c
index 5f589279e462..b991ec05b8f9 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -267,7 +267,7 @@ static inline void futex_get_mm(union futex_key *key)
267 * get_futex_key() implies a full barrier. This is relied upon 267 * get_futex_key() implies a full barrier. This is relied upon
268 * as full barrier (B), see the ordering comment above. 268 * as full barrier (B), see the ordering comment above.
269 */ 269 */
270 smp_mb__after_atomic_inc(); 270 smp_mb__after_atomic();
271} 271}
272 272
273/* 273/*
@@ -280,7 +280,7 @@ static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
280 /* 280 /*
281 * Full barrier (A), see the ordering comment above. 281 * Full barrier (A), see the ordering comment above.
282 */ 282 */
283 smp_mb__after_atomic_inc(); 283 smp_mb__after_atomic();
284#endif 284#endif
285} 285}
286 286
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 6b375af4958d..0ac67a5861c5 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -498,7 +498,7 @@ int __usermodehelper_disable(enum umh_disable_depth depth)
498static void helper_lock(void) 498static void helper_lock(void)
499{ 499{
500 atomic_inc(&running_helpers); 500 atomic_inc(&running_helpers);
501 smp_mb__after_atomic_inc(); 501 smp_mb__after_atomic();
502} 502}
503 503
504static void helper_unlock(void) 504static void helper_unlock(void)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 0c47e300210a..88b4a1dcb58c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -387,9 +387,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
387 } 387 }
388 rcu_prepare_for_idle(smp_processor_id()); 388 rcu_prepare_for_idle(smp_processor_id());
389 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 389 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
390 smp_mb__before_atomic_inc(); /* See above. */ 390 smp_mb__before_atomic(); /* See above. */
391 atomic_inc(&rdtp->dynticks); 391 atomic_inc(&rdtp->dynticks);
392 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */ 392 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
393 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); 393 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
394 394
395 /* 395 /*
@@ -507,10 +507,10 @@ void rcu_irq_exit(void)
507static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, 507static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
508 int user) 508 int user)
509{ 509{
510 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ 510 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
511 atomic_inc(&rdtp->dynticks); 511 atomic_inc(&rdtp->dynticks);
512 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 512 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
513 smp_mb__after_atomic_inc(); /* See above. */ 513 smp_mb__after_atomic(); /* See above. */
514 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 514 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
515 rcu_cleanup_after_idle(smp_processor_id()); 515 rcu_cleanup_after_idle(smp_processor_id());
516 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); 516 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
@@ -635,10 +635,10 @@ void rcu_nmi_enter(void)
635 (atomic_read(&rdtp->dynticks) & 0x1)) 635 (atomic_read(&rdtp->dynticks) & 0x1))
636 return; 636 return;
637 rdtp->dynticks_nmi_nesting++; 637 rdtp->dynticks_nmi_nesting++;
638 smp_mb__before_atomic_inc(); /* Force delay from prior write. */ 638 smp_mb__before_atomic(); /* Force delay from prior write. */
639 atomic_inc(&rdtp->dynticks); 639 atomic_inc(&rdtp->dynticks);
640 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 640 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
641 smp_mb__after_atomic_inc(); /* See above. */ 641 smp_mb__after_atomic(); /* See above. */
642 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 642 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
643} 643}
644 644
@@ -657,9 +657,9 @@ void rcu_nmi_exit(void)
657 --rdtp->dynticks_nmi_nesting != 0) 657 --rdtp->dynticks_nmi_nesting != 0)
658 return; 658 return;
659 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 659 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
660 smp_mb__before_atomic_inc(); /* See above. */ 660 smp_mb__before_atomic(); /* See above. */
661 atomic_inc(&rdtp->dynticks); 661 atomic_inc(&rdtp->dynticks);
662 smp_mb__after_atomic_inc(); /* Force delay to next write. */ 662 smp_mb__after_atomic(); /* Force delay to next write. */
663 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); 663 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
664} 664}
665 665
@@ -2790,7 +2790,7 @@ void synchronize_sched_expedited(void)
2790 s = atomic_long_read(&rsp->expedited_done); 2790 s = atomic_long_read(&rsp->expedited_done);
2791 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { 2791 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
2792 /* ensure test happens before caller kfree */ 2792 /* ensure test happens before caller kfree */
2793 smp_mb__before_atomic_inc(); /* ^^^ */ 2793 smp_mb__before_atomic(); /* ^^^ */
2794 atomic_long_inc(&rsp->expedited_workdone1); 2794 atomic_long_inc(&rsp->expedited_workdone1);
2795 return; 2795 return;
2796 } 2796 }
@@ -2808,7 +2808,7 @@ void synchronize_sched_expedited(void)
2808 s = atomic_long_read(&rsp->expedited_done); 2808 s = atomic_long_read(&rsp->expedited_done);
2809 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { 2809 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
2810 /* ensure test happens before caller kfree */ 2810 /* ensure test happens before caller kfree */
2811 smp_mb__before_atomic_inc(); /* ^^^ */ 2811 smp_mb__before_atomic(); /* ^^^ */
2812 atomic_long_inc(&rsp->expedited_workdone2); 2812 atomic_long_inc(&rsp->expedited_workdone2);
2813 return; 2813 return;
2814 } 2814 }
@@ -2837,7 +2837,7 @@ void synchronize_sched_expedited(void)
2837 s = atomic_long_read(&rsp->expedited_done); 2837 s = atomic_long_read(&rsp->expedited_done);
2838 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) { 2838 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
2839 /* ensure test happens before caller kfree */ 2839 /* ensure test happens before caller kfree */
2840 smp_mb__before_atomic_inc(); /* ^^^ */ 2840 smp_mb__before_atomic(); /* ^^^ */
2841 atomic_long_inc(&rsp->expedited_done_lost); 2841 atomic_long_inc(&rsp->expedited_done_lost);
2842 break; 2842 break;
2843 } 2843 }
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 962d1d589929..56db2f853e43 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2523,9 +2523,9 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
2523 /* Record start of fully idle period. */ 2523 /* Record start of fully idle period. */
2524 j = jiffies; 2524 j = jiffies;
2525 ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j; 2525 ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
2526 smp_mb__before_atomic_inc(); 2526 smp_mb__before_atomic();
2527 atomic_inc(&rdtp->dynticks_idle); 2527 atomic_inc(&rdtp->dynticks_idle);
2528 smp_mb__after_atomic_inc(); 2528 smp_mb__after_atomic();
2529 WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1); 2529 WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
2530} 2530}
2531 2531
@@ -2590,9 +2590,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
2590 } 2590 }
2591 2591
2592 /* Record end of idle period. */ 2592 /* Record end of idle period. */
2593 smp_mb__before_atomic_inc(); 2593 smp_mb__before_atomic();
2594 atomic_inc(&rdtp->dynticks_idle); 2594 atomic_inc(&rdtp->dynticks_idle);
2595 smp_mb__after_atomic_inc(); 2595 smp_mb__after_atomic();
2596 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1)); 2596 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
2597 2597
2598 /* 2598 /*
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 8b836b376d91..746bc9344969 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -165,7 +165,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
165 * do a write memory barrier, and then update the count, to 165 * do a write memory barrier, and then update the count, to
166 * make sure the vector is visible when count is set. 166 * make sure the vector is visible when count is set.
167 */ 167 */
168 smp_mb__before_atomic_inc(); 168 smp_mb__before_atomic();
169 atomic_inc(&(vec)->count); 169 atomic_inc(&(vec)->count);
170 do_mb = 1; 170 do_mb = 1;
171 } 171 }
@@ -185,14 +185,14 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
185 * the new priority vec. 185 * the new priority vec.
186 */ 186 */
187 if (do_mb) 187 if (do_mb)
188 smp_mb__after_atomic_inc(); 188 smp_mb__after_atomic();
189 189
190 /* 190 /*
191 * When removing from the vector, we decrement the counter first 191 * When removing from the vector, we decrement the counter first
192 * do a memory barrier and then clear the mask. 192 * do a memory barrier and then clear the mask.
193 */ 193 */
194 atomic_dec(&(vec)->count); 194 atomic_dec(&(vec)->count);
195 smp_mb__after_atomic_inc(); 195 smp_mb__after_atomic();
196 cpumask_clear_cpu(cpu, vec->mask); 196 cpumask_clear_cpu(cpu, vec->mask);
197 } 197 }
198 198
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 7d50f794e248..0ffa20ae657b 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -394,7 +394,7 @@ EXPORT_SYMBOL(__wake_up_bit);
394 * 394 *
395 * In order for this to function properly, as it uses waitqueue_active() 395 * In order for this to function properly, as it uses waitqueue_active()
396 * internally, some kind of memory barrier must be done prior to calling 396 * internally, some kind of memory barrier must be done prior to calling
397 * this. Typically, this will be smp_mb__after_clear_bit(), but in some 397 * this. Typically, this will be smp_mb__after_atomic(), but in some
398 * cases where bitflags are manipulated non-atomically under a lock, one 398 * cases where bitflags are manipulated non-atomically under a lock, one
399 * may need to use a less regular barrier, such fs/inode.c's smp_mb(), 399 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
400 * because spin_unlock() does not guarantee a memory barrier. 400 * because spin_unlock() does not guarantee a memory barrier.