diff options
Diffstat (limited to 'kernel/locking/mcs_spinlock.c')
-rw-r--r-- | kernel/locking/mcs_spinlock.c | 64 |
1 files changed, 48 insertions, 16 deletions
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c index 838dc9e00669..be9ee1559fca 100644 --- a/kernel/locking/mcs_spinlock.c +++ b/kernel/locking/mcs_spinlock.c | |||
@@ -14,21 +14,47 @@ | |||
14 | * called from interrupt context and we have preemption disabled while | 14 | * called from interrupt context and we have preemption disabled while |
15 | * spinning. | 15 | * spinning. |
16 | */ | 16 | */ |
17 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node); | 17 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node); |
18 | |||
19 | /* | ||
20 | * We use the value 0 to represent "no CPU", thus the encoded value | ||
21 | * will be the CPU number incremented by 1. | ||
22 | */ | ||
23 | static inline int encode_cpu(int cpu_nr) | ||
24 | { | ||
25 | return cpu_nr + 1; | ||
26 | } | ||
27 | |||
28 | static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val) | ||
29 | { | ||
30 | int cpu_nr = encoded_cpu_val - 1; | ||
31 | |||
32 | return per_cpu_ptr(&osq_node, cpu_nr); | ||
33 | } | ||
18 | 34 | ||
19 | /* | 35 | /* |
20 | * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. | 36 | * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. |
21 | * Can return NULL in case we were the last queued and we updated @lock instead. | 37 | * Can return NULL in case we were the last queued and we updated @lock instead. |
22 | */ | 38 | */ |
23 | static inline struct optimistic_spin_queue * | 39 | static inline struct optimistic_spin_node * |
24 | osq_wait_next(struct optimistic_spin_queue **lock, | 40 | osq_wait_next(struct optimistic_spin_queue *lock, |
25 | struct optimistic_spin_queue *node, | 41 | struct optimistic_spin_node *node, |
26 | struct optimistic_spin_queue *prev) | 42 | struct optimistic_spin_node *prev) |
27 | { | 43 | { |
28 | struct optimistic_spin_queue *next = NULL; | 44 | struct optimistic_spin_node *next = NULL; |
45 | int curr = encode_cpu(smp_processor_id()); | ||
46 | int old; | ||
47 | |||
48 | /* | ||
49 | * If there is a prev node in queue, then the 'old' value will be | ||
50 | * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if | ||
51 | * we're currently last in queue, then the queue will then become empty. | ||
52 | */ | ||
53 | old = prev ? prev->cpu : OSQ_UNLOCKED_VAL; | ||
29 | 54 | ||
30 | for (;;) { | 55 | for (;;) { |
31 | if (*lock == node && cmpxchg(lock, node, prev) == node) { | 56 | if (atomic_read(&lock->tail) == curr && |
57 | atomic_cmpxchg(&lock->tail, curr, old) == curr) { | ||
32 | /* | 58 | /* |
33 | * We were the last queued, we moved @lock back. @prev | 59 | * We were the last queued, we moved @lock back. @prev |
34 | * will now observe @lock and will complete its | 60 | * will now observe @lock and will complete its |
@@ -59,18 +85,23 @@ osq_wait_next(struct optimistic_spin_queue **lock, | |||
59 | return next; | 85 | return next; |
60 | } | 86 | } |
61 | 87 | ||
62 | bool osq_lock(struct optimistic_spin_queue **lock) | 88 | bool osq_lock(struct optimistic_spin_queue *lock) |
63 | { | 89 | { |
64 | struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); | 90 | struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); |
65 | struct optimistic_spin_queue *prev, *next; | 91 | struct optimistic_spin_node *prev, *next; |
92 | int curr = encode_cpu(smp_processor_id()); | ||
93 | int old; | ||
66 | 94 | ||
67 | node->locked = 0; | 95 | node->locked = 0; |
68 | node->next = NULL; | 96 | node->next = NULL; |
97 | node->cpu = curr; | ||
69 | 98 | ||
70 | node->prev = prev = xchg(lock, node); | 99 | old = atomic_xchg(&lock->tail, curr); |
71 | if (likely(prev == NULL)) | 100 | if (old == OSQ_UNLOCKED_VAL) |
72 | return true; | 101 | return true; |
73 | 102 | ||
103 | prev = decode_cpu(old); | ||
104 | node->prev = prev; | ||
74 | ACCESS_ONCE(prev->next) = node; | 105 | ACCESS_ONCE(prev->next) = node; |
75 | 106 | ||
76 | /* | 107 | /* |
@@ -149,20 +180,21 @@ unqueue: | |||
149 | return false; | 180 | return false; |
150 | } | 181 | } |
151 | 182 | ||
152 | void osq_unlock(struct optimistic_spin_queue **lock) | 183 | void osq_unlock(struct optimistic_spin_queue *lock) |
153 | { | 184 | { |
154 | struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); | 185 | struct optimistic_spin_node *node, *next; |
155 | struct optimistic_spin_queue *next; | 186 | int curr = encode_cpu(smp_processor_id()); |
156 | 187 | ||
157 | /* | 188 | /* |
158 | * Fast path for the uncontended case. | 189 | * Fast path for the uncontended case. |
159 | */ | 190 | */ |
160 | if (likely(cmpxchg(lock, node, NULL) == node)) | 191 | if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr)) |
161 | return; | 192 | return; |
162 | 193 | ||
163 | /* | 194 | /* |
164 | * Second most likely case. | 195 | * Second most likely case. |
165 | */ | 196 | */ |
197 | node = this_cpu_ptr(&osq_node); | ||
166 | next = xchg(&node->next, NULL); | 198 | next = xchg(&node->next, NULL); |
167 | if (next) { | 199 | if (next) { |
168 | ACCESS_ONCE(next->locked) = 1; | 200 | ACCESS_ONCE(next->locked) = 1; |