diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/mcs_spinlock.h | 112 | ||||
-rw-r--r-- | include/linux/mutex.h | 5 |
2 files changed, 115 insertions, 2 deletions
diff --git a/include/linux/mcs_spinlock.h b/include/linux/mcs_spinlock.h new file mode 100644 index 000000000000..e9a4d74c63dc --- /dev/null +++ b/include/linux/mcs_spinlock.h | |||
@@ -0,0 +1,112 @@ | |||
1 | /* | ||
2 | * MCS lock defines | ||
3 | * | ||
4 | * This file contains the main data structure and API definitions of MCS lock. | ||
5 | * | ||
6 | * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock | ||
7 | * with the desirable properties of being fair, and with each cpu trying | ||
8 | * to acquire the lock spinning on a local variable. | ||
9 | * It avoids expensive cache bouncings that common test-and-set spin-lock | ||
10 | * implementations incur. | ||
11 | */ | ||
12 | #ifndef __LINUX_MCS_SPINLOCK_H | ||
13 | #define __LINUX_MCS_SPINLOCK_H | ||
14 | |||
15 | struct mcs_spinlock { | ||
16 | struct mcs_spinlock *next; | ||
17 | int locked; /* 1 if lock acquired */ | ||
18 | }; | ||
19 | |||
20 | #ifndef arch_mcs_spin_lock_contended | ||
21 | /* | ||
22 | * Using smp_load_acquire() provides a memory barrier that ensures | ||
23 | * subsequent operations happen after the lock is acquired. | ||
24 | */ | ||
25 | #define arch_mcs_spin_lock_contended(l) \ | ||
26 | do { \ | ||
27 | while (!(smp_load_acquire(l))) \ | ||
28 | arch_mutex_cpu_relax(); \ | ||
29 | } while (0) | ||
30 | #endif | ||
31 | |||
32 | #ifndef arch_mcs_spin_unlock_contended | ||
33 | /* | ||
34 | * smp_store_release() provides a memory barrier to ensure all | ||
35 | * operations in the critical section has been completed before | ||
36 | * unlocking. | ||
37 | */ | ||
38 | #define arch_mcs_spin_unlock_contended(l) \ | ||
39 | smp_store_release((l), 1) | ||
40 | #endif | ||
41 | |||
42 | /* | ||
43 | * Note: the smp_load_acquire/smp_store_release pair is not | ||
44 | * sufficient to form a full memory barrier across | ||
45 | * cpus for many architectures (except x86) for mcs_unlock and mcs_lock. | ||
46 | * For applications that need a full barrier across multiple cpus | ||
47 | * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be | ||
48 | * used after mcs_lock. | ||
49 | */ | ||
50 | |||
51 | /* | ||
52 | * In order to acquire the lock, the caller should declare a local node and | ||
53 | * pass a reference of the node to this function in addition to the lock. | ||
54 | * If the lock has already been acquired, then this will proceed to spin | ||
55 | * on this node->locked until the previous lock holder sets the node->locked | ||
56 | * in mcs_spin_unlock(). | ||
57 | * | ||
58 | * We don't inline mcs_spin_lock() so that perf can correctly account for the | ||
59 | * time spent in this lock function. | ||
60 | */ | ||
61 | static inline | ||
62 | void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) | ||
63 | { | ||
64 | struct mcs_spinlock *prev; | ||
65 | |||
66 | /* Init node */ | ||
67 | node->locked = 0; | ||
68 | node->next = NULL; | ||
69 | |||
70 | prev = xchg(lock, node); | ||
71 | if (likely(prev == NULL)) { | ||
72 | /* | ||
73 | * Lock acquired, don't need to set node->locked to 1. Threads | ||
74 | * only spin on its own node->locked value for lock acquisition. | ||
75 | * However, since this thread can immediately acquire the lock | ||
76 | * and does not proceed to spin on its own node->locked, this | ||
77 | * value won't be used. If a debug mode is needed to | ||
78 | * audit lock status, then set node->locked value here. | ||
79 | */ | ||
80 | return; | ||
81 | } | ||
82 | ACCESS_ONCE(prev->next) = node; | ||
83 | |||
84 | /* Wait until the lock holder passes the lock down. */ | ||
85 | arch_mcs_spin_lock_contended(&node->locked); | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Releases the lock. The caller should pass in the corresponding node that | ||
90 | * was used to acquire the lock. | ||
91 | */ | ||
92 | static inline | ||
93 | void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) | ||
94 | { | ||
95 | struct mcs_spinlock *next = ACCESS_ONCE(node->next); | ||
96 | |||
97 | if (likely(!next)) { | ||
98 | /* | ||
99 | * Release the lock by setting it to NULL | ||
100 | */ | ||
101 | if (likely(cmpxchg(lock, node, NULL) == node)) | ||
102 | return; | ||
103 | /* Wait until the next pointer is set */ | ||
104 | while (!(next = ACCESS_ONCE(node->next))) | ||
105 | arch_mutex_cpu_relax(); | ||
106 | } | ||
107 | |||
108 | /* Pass lock to next waiter. */ | ||
109 | arch_mcs_spin_unlock_contended(&next->locked); | ||
110 | } | ||
111 | |||
112 | #endif /* __LINUX_MCS_SPINLOCK_H */ | ||
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index d3181936c138..c482e1d2cc49 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -46,6 +46,7 @@ | |||
46 | * - detects multi-task circular deadlocks and prints out all affected | 46 | * - detects multi-task circular deadlocks and prints out all affected |
47 | * locks and tasks (and only those tasks) | 47 | * locks and tasks (and only those tasks) |
48 | */ | 48 | */ |
49 | struct mcs_spinlock; | ||
49 | struct mutex { | 50 | struct mutex { |
50 | /* 1: unlocked, 0: locked, negative: locked, possible waiters */ | 51 | /* 1: unlocked, 0: locked, negative: locked, possible waiters */ |
51 | atomic_t count; | 52 | atomic_t count; |
@@ -55,7 +56,7 @@ struct mutex { | |||
55 | struct task_struct *owner; | 56 | struct task_struct *owner; |
56 | #endif | 57 | #endif |
57 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 58 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
58 | void *spin_mlock; /* Spinner MCS lock */ | 59 | struct mcs_spinlock *mcs_lock; /* Spinner MCS lock */ |
59 | #endif | 60 | #endif |
60 | #ifdef CONFIG_DEBUG_MUTEXES | 61 | #ifdef CONFIG_DEBUG_MUTEXES |
61 | const char *name; | 62 | const char *name; |
@@ -179,4 +180,4 @@ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); | |||
179 | # define arch_mutex_cpu_relax() cpu_relax() | 180 | # define arch_mutex_cpu_relax() cpu_relax() |
180 | #endif | 181 | #endif |
181 | 182 | ||
182 | #endif | 183 | #endif /* __LINUX_MUTEX_H */ |