aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJason Low <jason.low2@hp.com>2014-01-21 18:36:05 -0500
committerIngo Molnar <mingo@kernel.org>2014-01-28 07:13:28 -0500
commit5faeb8adb956a5ad6579c4e309e8689943ad8294 (patch)
tree7b80bcdd7e59b323d5a7b72f634ca5e6dee2017a /include
parente72246748ff006ab928bc774e276e6ef5542f9c5 (diff)
locking/mcs: Micro-optimize the MCS code, add extra comments
Remove unnecessary operation to assign locked status to 1 if lock is acquired without contention. Lock status will not be checked by lock holder again once it is acquired and any lock contenders will not be looking at the lock holder's lock status. Make the cmpxchg(lock, node, NULL) == node check in mcs_spin_unlock() likely() as it is likely that a race did not occur most of the time. Also add in more comments describing how the local node is used in MCS locks. Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com> Signed-off-by: Jason Low <jason.low2@hp.com> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/1390347365.3138.64.camel@schen9-DESK Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/mcs_spinlock.h27
1 files changed, 24 insertions, 3 deletions
diff --git a/include/linux/mcs_spinlock.h b/include/linux/mcs_spinlock.h
index 9578ef81940b..143fa428a857 100644
--- a/include/linux/mcs_spinlock.h
+++ b/include/linux/mcs_spinlock.h
@@ -25,6 +25,17 @@ struct mcs_spinlock {
25 * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be 25 * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
26 * used after mcs_lock. 26 * used after mcs_lock.
27 */ 27 */
28
29/*
30 * In order to acquire the lock, the caller should declare a local node and
31 * pass a reference of the node to this function in addition to the lock.
32 * If the lock has already been acquired, then this will proceed to spin
33 * on this node->locked until the previous lock holder sets the node->locked
34 * in mcs_spin_unlock().
35 *
36 * We don't inline mcs_spin_lock() so that perf can correctly account for the
37 * time spent in this lock function.
38 */
28static inline 39static inline
29void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) 40void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
30{ 41{
@@ -36,8 +47,14 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
36 47
37 prev = xchg(lock, node); 48 prev = xchg(lock, node);
38 if (likely(prev == NULL)) { 49 if (likely(prev == NULL)) {
39 /* Lock acquired */ 50 /*
40 node->locked = 1; 51 * Lock acquired, don't need to set node->locked to 1. Threads
52 * only spin on its own node->locked value for lock acquisition.
53 * However, since this thread can immediately acquire the lock
54 * and does not proceed to spin on its own node->locked, this
55 * value won't be used. If a debug mode is needed to
56 * audit lock status, then set node->locked value here.
57 */
41 return; 58 return;
42 } 59 }
43 ACCESS_ONCE(prev->next) = node; 60 ACCESS_ONCE(prev->next) = node;
@@ -50,6 +67,10 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
50 arch_mutex_cpu_relax(); 67 arch_mutex_cpu_relax();
51} 68}
52 69
70/*
71 * Releases the lock. The caller should pass in the corresponding node that
72 * was used to acquire the lock.
73 */
53static inline 74static inline
54void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) 75void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
55{ 76{
@@ -59,7 +80,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
59 /* 80 /*
60 * Release the lock by setting it to NULL 81 * Release the lock by setting it to NULL
61 */ 82 */
62 if (cmpxchg(lock, node, NULL) == node) 83 if (likely(cmpxchg(lock, node, NULL) == node))
63 return; 84 return;
64 /* Wait until the next pointer is set */ 85 /* Wait until the next pointer is set */
65 while (!(next = ACCESS_ONCE(node->next))) 86 while (!(next = ACCESS_ONCE(node->next)))