aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWaiman Long <Waiman.Long@hp.com>2014-01-21 18:35:53 -0500
committerIngo Molnar <mingo@kernel.org>2014-01-28 07:13:26 -0500
commitaff7385b5a16bca6b8d9243f01a9ea5a5b411e1d (patch)
treeca8b03e4e6aa1d0fcd8ae971e98b0719fdb08666
parent270750dbc18a71b23d660df110e433ff9616a2d4 (diff)
locking/mutexes/mcs: Correct barrier usage
This patch corrects the way memory barriers are used in the MCS lock with smp_load_acquire and smp_store_release fucnctions. The previous barriers could leak critical sections if mcs lock is used by itself. It is not a problem when mcs lock is embedded in mutex but will be an issue when the mcs_lock is used elsewhere. The patch removes the incorrect barriers and put in correct barriers with the pair of functions smp_load_acquire and smp_store_release. Suggested-by: Michel Lespinasse <walken@google.com> Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Waiman Long <Waiman.Long@hp.com> Signed-off-by: Jason Low <jason.low2@hp.com> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1390347353.3138.62.camel@schen9-DESK Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/locking/mutex.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 4dd6e4c219de..fbbd2eda867e 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -136,9 +136,12 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
136 return; 136 return;
137 } 137 }
138 ACCESS_ONCE(prev->next) = node; 138 ACCESS_ONCE(prev->next) = node;
139 smp_wmb(); 139 /*
140 /* Wait until the lock holder passes the lock down */ 140 * Wait until the lock holder passes the lock down.
141 while (!ACCESS_ONCE(node->locked)) 141 * Using smp_load_acquire() provides a memory barrier that
142 * ensures subsequent operations happen after the lock is acquired.
143 */
144 while (!(smp_load_acquire(&node->locked)))
142 arch_mutex_cpu_relax(); 145 arch_mutex_cpu_relax();
143} 146}
144 147
@@ -156,8 +159,13 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
156 while (!(next = ACCESS_ONCE(node->next))) 159 while (!(next = ACCESS_ONCE(node->next)))
157 arch_mutex_cpu_relax(); 160 arch_mutex_cpu_relax();
158 } 161 }
159 ACCESS_ONCE(next->locked) = 1; 162 /*
160 smp_wmb(); 163 * Pass lock to next waiter.
164 * smp_store_release() provides a memory barrier to ensure
165 * all operations in the critical section has been completed
166 * before unlocking.
167 */
168 smp_store_release(&next->locked, 1);
161} 169}
162 170
163/* 171/*