aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mcs_spinlock.h
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2014-01-21 18:36:10 -0500
committerIngo Molnar <mingo@kernel.org>2014-01-28 07:13:28 -0500
commite207552e64ea053a33e856828ad7915484911d06 (patch)
treecf3fcc21ce54d15edc8bf13373c786ed6511ee41 /include/linux/mcs_spinlock.h
parent5faeb8adb956a5ad6579c4e309e8689943ad8294 (diff)
locking/mcs: Allow architectures to hook in to contended paths
When contended, architectures may be able to reduce the polling overhead in ways which aren't expressible using a simple relax() primitive. This patch allows architectures to hook into the mcs_{lock,unlock} functions for the contended cases only. Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/1390347370.3138.65.camel@schen9-DESK Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/mcs_spinlock.h')
-rw-r--r--include/linux/mcs_spinlock.h42
1 files changed, 28 insertions, 14 deletions
diff --git a/include/linux/mcs_spinlock.h b/include/linux/mcs_spinlock.h
index 143fa428a857..e9a4d74c63dc 100644
--- a/include/linux/mcs_spinlock.h
+++ b/include/linux/mcs_spinlock.h
@@ -17,6 +17,28 @@ struct mcs_spinlock {
17 int locked; /* 1 if lock acquired */ 17 int locked; /* 1 if lock acquired */
18}; 18};
19 19
20#ifndef arch_mcs_spin_lock_contended
21/*
22 * Using smp_load_acquire() provides a memory barrier that ensures
23 * subsequent operations happen after the lock is acquired.
24 */
25#define arch_mcs_spin_lock_contended(l) \
26do { \
27 while (!(smp_load_acquire(l))) \
28 arch_mutex_cpu_relax(); \
29} while (0)
30#endif
31
32#ifndef arch_mcs_spin_unlock_contended
33/*
34 * smp_store_release() provides a memory barrier to ensure all
35 * operations in the critical section has been completed before
36 * unlocking.
37 */
38#define arch_mcs_spin_unlock_contended(l) \
39 smp_store_release((l), 1)
40#endif
41
20/* 42/*
21 * Note: the smp_load_acquire/smp_store_release pair is not 43 * Note: the smp_load_acquire/smp_store_release pair is not
22 * sufficient to form a full memory barrier across 44 * sufficient to form a full memory barrier across
@@ -58,13 +80,9 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
58 return; 80 return;
59 } 81 }
60 ACCESS_ONCE(prev->next) = node; 82 ACCESS_ONCE(prev->next) = node;
61 /* 83
62 * Wait until the lock holder passes the lock down. 84 /* Wait until the lock holder passes the lock down. */
63 * Using smp_load_acquire() provides a memory barrier that 85 arch_mcs_spin_lock_contended(&node->locked);
64 * ensures subsequent operations happen after the lock is acquired.
65 */
66 while (!(smp_load_acquire(&node->locked)))
67 arch_mutex_cpu_relax();
68} 86}
69 87
70/* 88/*
@@ -86,13 +104,9 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
86 while (!(next = ACCESS_ONCE(node->next))) 104 while (!(next = ACCESS_ONCE(node->next)))
87 arch_mutex_cpu_relax(); 105 arch_mutex_cpu_relax();
88 } 106 }
89 /* 107
90 * Pass lock to next waiter. 108 /* Pass lock to next waiter. */
91 * smp_store_release() provides a memory barrier to ensure 109 arch_mcs_spin_unlock_contended(&next->locked);
92 * all operations in the critical section has been completed
93 * before unlocking.
94 */
95 smp_store_release(&next->locked, 1);
96} 110}
97 111
98#endif /* __LINUX_MCS_SPINLOCK_H */ 112#endif /* __LINUX_MCS_SPINLOCK_H */