diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-03-17 13:06:10 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-04-18 08:20:48 -0400 |
commit | 4e857c58efeb99393cba5a5d0d8ec7117183137c (patch) | |
tree | 3f6fd464e4fddb2fe90374c075c9d06603cf8bbc /kernel/sched | |
parent | 1b15611e1c30b37abe393d411c316cd659920bf5 (diff) |
arch: Mass conversion of smp_mb__*()
Mostly scripted conversion of the smp_mb__* barriers.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/n/tip-55dhyhocezdw1dg7u19hmh1u@git.kernel.org
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-arch@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/cpupri.c | 6 | ||||
-rw-r--r-- | kernel/sched/wait.c | 2 |
2 files changed, 4 insertions, 4 deletions
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 8b836b376d91..746bc9344969 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c | |||
@@ -165,7 +165,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |||
165 | * do a write memory barrier, and then update the count, to | 165 | * do a write memory barrier, and then update the count, to |
166 | * make sure the vector is visible when count is set. | 166 | * make sure the vector is visible when count is set. |
167 | */ | 167 | */ |
168 | smp_mb__before_atomic_inc(); | 168 | smp_mb__before_atomic(); |
169 | atomic_inc(&(vec)->count); | 169 | atomic_inc(&(vec)->count); |
170 | do_mb = 1; | 170 | do_mb = 1; |
171 | } | 171 | } |
@@ -185,14 +185,14 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |||
185 | * the new priority vec. | 185 | * the new priority vec. |
186 | */ | 186 | */ |
187 | if (do_mb) | 187 | if (do_mb) |
188 | smp_mb__after_atomic_inc(); | 188 | smp_mb__after_atomic(); |
189 | 189 | ||
190 | /* | 190 | /* |
191 | * When removing from the vector, we decrement the counter first | 191 | * When removing from the vector, we decrement the counter first |
192 | * do a memory barrier and then clear the mask. | 192 | * do a memory barrier and then clear the mask. |
193 | */ | 193 | */ |
194 | atomic_dec(&(vec)->count); | 194 | atomic_dec(&(vec)->count); |
195 | smp_mb__after_atomic_inc(); | 195 | smp_mb__after_atomic(); |
196 | cpumask_clear_cpu(cpu, vec->mask); | 196 | cpumask_clear_cpu(cpu, vec->mask); |
197 | } | 197 | } |
198 | 198 | ||
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 7d50f794e248..0ffa20ae657b 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c | |||
@@ -394,7 +394,7 @@ EXPORT_SYMBOL(__wake_up_bit); | |||
394 | * | 394 | * |
395 | * In order for this to function properly, as it uses waitqueue_active() | 395 | * In order for this to function properly, as it uses waitqueue_active() |
396 | * internally, some kind of memory barrier must be done prior to calling | 396 | * internally, some kind of memory barrier must be done prior to calling |
397 | * this. Typically, this will be smp_mb__after_clear_bit(), but in some | 397 | * this. Typically, this will be smp_mb__after_atomic(), but in some |
398 | * cases where bitflags are manipulated non-atomically under a lock, one | 398 | * cases where bitflags are manipulated non-atomically under a lock, one |
399 | * may need to use a less regular barrier, such fs/inode.c's smp_mb(), | 399 | * may need to use a less regular barrier, such fs/inode.c's smp_mb(), |
400 | * because spin_unlock() does not guarantee a memory barrier. | 400 | * because spin_unlock() does not guarantee a memory barrier. |