aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mutex.h4
-rw-r--r--include/linux/osq_lock.h19
-rw-r--r--include/linux/rwsem.h7
-rw-r--r--kernel/locking/mcs_spinlock.c46
-rw-r--r--kernel/locking/mcs_spinlock.h5
-rw-r--r--kernel/locking/mutex.c2
-rw-r--r--kernel/locking/rwsem-xadd.c2
7 files changed, 68 insertions, 17 deletions
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 885f3f56a77f..42aa9b9ecd5f 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -17,6 +17,7 @@
17#include <linux/lockdep.h> 17#include <linux/lockdep.h>
18#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <asm/processor.h> 19#include <asm/processor.h>
20#include <linux/osq_lock.h>
20 21
21/* 22/*
22 * Simple, straightforward mutexes with strict semantics: 23 * Simple, straightforward mutexes with strict semantics:
@@ -46,7 +47,6 @@
46 * - detects multi-task circular deadlocks and prints out all affected 47 * - detects multi-task circular deadlocks and prints out all affected
47 * locks and tasks (and only those tasks) 48 * locks and tasks (and only those tasks)
48 */ 49 */
49struct optimistic_spin_node;
50struct mutex { 50struct mutex {
51 /* 1: unlocked, 0: locked, negative: locked, possible waiters */ 51 /* 1: unlocked, 0: locked, negative: locked, possible waiters */
52 atomic_t count; 52 atomic_t count;
@@ -56,7 +56,7 @@ struct mutex {
56 struct task_struct *owner; 56 struct task_struct *owner;
57#endif 57#endif
58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
59 struct optimistic_spin_node *osq; /* Spinner MCS lock */ 59 struct optimistic_spin_queue osq; /* Spinner MCS lock */
60#endif 60#endif
61#ifdef CONFIG_DEBUG_MUTEXES 61#ifdef CONFIG_DEBUG_MUTEXES
62 const char *name; 62 const char *name;
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
new file mode 100644
index 000000000000..b001682bf7cb
--- /dev/null
+++ b/include/linux/osq_lock.h
@@ -0,0 +1,19 @@
1#ifndef __LINUX_OSQ_LOCK_H
2#define __LINUX_OSQ_LOCK_H
3
4/*
5 * An MCS like lock especially tailored for optimistic spinning for sleeping
6 * lock implementations (mutex, rwsem, etc).
7 */
8
9#define OSQ_UNLOCKED_VAL (0)
10
11struct optimistic_spin_queue {
12 /*
13 * Stores an encoded value of the CPU # of the tail node in the queue.
14 * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
15 */
16 atomic_t tail;
17};
18
19#endif
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index ba3f108ddea1..9fdcdd03507d 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -13,10 +13,9 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16
17#include <linux/atomic.h> 16#include <linux/atomic.h>
17#include <linux/osq_lock.h>
18 18
19struct optimistic_spin_node;
20struct rw_semaphore; 19struct rw_semaphore;
21 20
22#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK 21#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@@ -33,7 +32,7 @@ struct rw_semaphore {
33 * if the owner is running on the cpu. 32 * if the owner is running on the cpu.
34 */ 33 */
35 struct task_struct *owner; 34 struct task_struct *owner;
36 struct optimistic_spin_node *osq; /* spinner MCS lock */ 35 struct optimistic_spin_queue osq; /* spinner MCS lock */
37#endif 36#endif
38#ifdef CONFIG_DEBUG_LOCK_ALLOC 37#ifdef CONFIG_DEBUG_LOCK_ALLOC
39 struct lockdep_map dep_map; 38 struct lockdep_map dep_map;
@@ -70,7 +69,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
70 __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ 69 __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
71 LIST_HEAD_INIT((name).wait_list), \ 70 LIST_HEAD_INIT((name).wait_list), \
72 NULL, /* owner */ \ 71 NULL, /* owner */ \
73 NULL /* mcs lock */ \ 72 { ATOMIC_INIT(OSQ_UNLOCKED_VAL) } /* osq */ \
74 __RWSEM_DEP_MAP_INIT(name) } 73 __RWSEM_DEP_MAP_INIT(name) }
75#else 74#else
76#define __RWSEM_INITIALIZER(name) \ 75#define __RWSEM_INITIALIZER(name) \
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
index e9866f70e828..32fc16c0a545 100644
--- a/kernel/locking/mcs_spinlock.c
+++ b/kernel/locking/mcs_spinlock.c
@@ -17,18 +17,44 @@
17static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node); 17static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
18 18
19/* 19/*
20 * We use the value 0 to represent "no CPU", thus the encoded value
21 * will be the CPU number incremented by 1.
22 */
23static inline int encode_cpu(int cpu_nr)
24{
25 return cpu_nr + 1;
26}
27
28static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
29{
30 int cpu_nr = encoded_cpu_val - 1;
31
32 return per_cpu_ptr(&osq_node, cpu_nr);
33}
34
35/*
20 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. 36 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
21 * Can return NULL in case we were the last queued and we updated @lock instead. 37 * Can return NULL in case we were the last queued and we updated @lock instead.
22 */ 38 */
23static inline struct optimistic_spin_node * 39static inline struct optimistic_spin_node *
24osq_wait_next(struct optimistic_spin_node **lock, 40osq_wait_next(struct optimistic_spin_queue *lock,
25 struct optimistic_spin_node *node, 41 struct optimistic_spin_node *node,
26 struct optimistic_spin_node *prev) 42 struct optimistic_spin_node *prev)
27{ 43{
28 struct optimistic_spin_node *next = NULL; 44 struct optimistic_spin_node *next = NULL;
45 int curr = encode_cpu(smp_processor_id());
46 int old;
47
48 /*
49 * If there is a prev node in queue, then the 'old' value will be
50 * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
51 * we're currently last in queue, then the queue will then become empty.
52 */
53 old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
29 54
30 for (;;) { 55 for (;;) {
31 if (*lock == node && cmpxchg(lock, node, prev) == node) { 56 if (atomic_read(&lock->tail) == curr &&
57 atomic_cmpxchg(&lock->tail, curr, old) == curr) {
32 /* 58 /*
33 * We were the last queued, we moved @lock back. @prev 59 * We were the last queued, we moved @lock back. @prev
34 * will now observe @lock and will complete its 60 * will now observe @lock and will complete its
@@ -59,18 +85,23 @@ osq_wait_next(struct optimistic_spin_node **lock,
59 return next; 85 return next;
60} 86}
61 87
62bool osq_lock(struct optimistic_spin_node **lock) 88bool osq_lock(struct optimistic_spin_queue *lock)
63{ 89{
64 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); 90 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
65 struct optimistic_spin_node *prev, *next; 91 struct optimistic_spin_node *prev, *next;
92 int curr = encode_cpu(smp_processor_id());
93 int old;
66 94
67 node->locked = 0; 95 node->locked = 0;
68 node->next = NULL; 96 node->next = NULL;
97 node->cpu = curr;
69 98
70 node->prev = prev = xchg(lock, node); 99 old = atomic_xchg(&lock->tail, curr);
71 if (likely(prev == NULL)) 100 if (old == OSQ_UNLOCKED_VAL)
72 return true; 101 return true;
73 102
103 prev = decode_cpu(old);
104 node->prev = prev;
74 ACCESS_ONCE(prev->next) = node; 105 ACCESS_ONCE(prev->next) = node;
75 106
76 /* 107 /*
@@ -149,15 +180,16 @@ unqueue:
149 return false; 180 return false;
150} 181}
151 182
152void osq_unlock(struct optimistic_spin_node **lock) 183void osq_unlock(struct optimistic_spin_queue *lock)
153{ 184{
154 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); 185 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
155 struct optimistic_spin_node *next; 186 struct optimistic_spin_node *next;
187 int curr = encode_cpu(smp_processor_id());
156 188
157 /* 189 /*
158 * Fast path for the uncontended case. 190 * Fast path for the uncontended case.
159 */ 191 */
160 if (likely(cmpxchg(lock, node, NULL) == node)) 192 if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
161 return; 193 return;
162 194
163 /* 195 /*
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index c99dc0052f49..74356dc0ce29 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -121,9 +121,10 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
121struct optimistic_spin_node { 121struct optimistic_spin_node {
122 struct optimistic_spin_node *next, *prev; 122 struct optimistic_spin_node *next, *prev;
123 int locked; /* 1 if lock acquired */ 123 int locked; /* 1 if lock acquired */
124 int cpu; /* encoded CPU # value */
124}; 125};
125 126
126extern bool osq_lock(struct optimistic_spin_node **lock); 127extern bool osq_lock(struct optimistic_spin_queue *lock);
127extern void osq_unlock(struct optimistic_spin_node **lock); 128extern void osq_unlock(struct optimistic_spin_queue *lock);
128 129
129#endif /* __LINUX_MCS_SPINLOCK_H */ 130#endif /* __LINUX_MCS_SPINLOCK_H */
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index bc73d33c6760..d9b313906caa 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -60,7 +60,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
60 INIT_LIST_HEAD(&lock->wait_list); 60 INIT_LIST_HEAD(&lock->wait_list);
61 mutex_clear_owner(lock); 61 mutex_clear_owner(lock);
62#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 62#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
63 lock->osq = NULL; 63 atomic_set(&lock->osq.tail, OSQ_UNLOCKED_VAL);
64#endif 64#endif
65 65
66 debug_mutex_init(lock, name, key); 66 debug_mutex_init(lock, name, key);
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index c40c7d28661d..b77a6230bbf6 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -84,7 +84,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
84 INIT_LIST_HEAD(&sem->wait_list); 84 INIT_LIST_HEAD(&sem->wait_list);
85#ifdef CONFIG_SMP 85#ifdef CONFIG_SMP
86 sem->owner = NULL; 86 sem->owner = NULL;
87 sem->osq = NULL; 87 atomic_set(&sem->osq.tail, OSQ_UNLOCKED_VAL);
88#endif 88#endif
89} 89}
90 90