aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWaiman Long <Waiman.Long@hp.com>2015-04-24 14:56:30 -0400
committerIngo Molnar <mingo@kernel.org>2015-05-08 06:36:25 -0400
commita33fda35e3a7655fb7df756ed67822afb5ed5e8d (patch)
treea146e440087aa927d6a9644cdf4514a04ed05e59
parent663fdcbee0a656cdaef934e7f50e6c2670373bc9 (diff)
locking/qspinlock: Introduce a simple generic 4-byte queued spinlock
This patch introduces a new generic queued spinlock implementation that can serve as an alternative to the default ticket spinlock. Compared with the ticket spinlock, this queued spinlock should be almost as fair as the ticket spinlock. It has about the same speed in single-thread and it can be much faster in high contention situations especially when the spinlock is embedded within the data structure to be protected. Only in light to moderate contention where the average queue depth is around 1-3 will this queued spinlock be potentially a bit slower due to the higher slowpath overhead. This queued spinlock is especially suit to NUMA machines with a large number of cores as the chance of spinlock contention is much higher in those machines. The cost of contention is also higher because of slower inter-node memory traffic. Due to the fact that spinlocks are acquired with preemption disabled, the process will not be migrated to another CPU while it is trying to get a spinlock. Ignoring interrupt handling, a CPU can only be contending in one spinlock at any one time. Counting soft IRQ, hard IRQ and NMI, a CPU can only have a maximum of 4 concurrent lock waiting activities. By allocating a set of per-cpu queue nodes and used them to form a waiting queue, we can encode the queue node address into a much smaller 24-bit size (including CPU number and queue node index) leaving one byte for the lock. Please note that the queue node is only needed when waiting for the lock. Once the lock is acquired, the queue node can be released to be used later. Signed-off-by: Waiman Long <Waiman.Long@hp.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Daniel J Blueman <daniel@numascale.com> Cc: David Vrabel <david.vrabel@citrix.com> Cc: Douglas Hatch <doug.hatch@hp.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paolo Bonzini <paolo.bonzini@gmail.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Cc: Rik van Riel <riel@redhat.com> Cc: Scott J Norton <scott.norton@hp.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/1429901803-29771-2-git-send-email-Waiman.Long@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/asm-generic/qspinlock.h132
-rw-r--r--include/asm-generic/qspinlock_types.h58
-rw-r--r--kernel/Kconfig.locks7
-rw-r--r--kernel/locking/Makefile1
-rw-r--r--kernel/locking/mcs_spinlock.h1
-rw-r--r--kernel/locking/qspinlock.c209
6 files changed, 408 insertions, 0 deletions
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
new file mode 100644
index 000000000000..569abcd47a9a
--- /dev/null
+++ b/include/asm-generic/qspinlock.h
@@ -0,0 +1,132 @@
1/*
2 * Queued spinlock
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 *
16 * Authors: Waiman Long <waiman.long@hp.com>
17 */
18#ifndef __ASM_GENERIC_QSPINLOCK_H
19#define __ASM_GENERIC_QSPINLOCK_H
20
21#include <asm-generic/qspinlock_types.h>
22
23/**
24 * queued_spin_is_locked - is the spinlock locked?
25 * @lock: Pointer to queued spinlock structure
26 * Return: 1 if it is locked, 0 otherwise
27 */
28static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
29{
30 return atomic_read(&lock->val);
31}
32
33/**
34 * queued_spin_value_unlocked - is the spinlock structure unlocked?
35 * @lock: queued spinlock structure
36 * Return: 1 if it is unlocked, 0 otherwise
37 *
38 * N.B. Whenever there are tasks waiting for the lock, it is considered
39 * locked wrt the lockref code to avoid lock stealing by the lockref
40 * code and change things underneath the lock. This also allows some
41 * optimizations to be applied without conflict with lockref.
42 */
43static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
44{
45 return !atomic_read(&lock.val);
46}
47
48/**
49 * queued_spin_is_contended - check if the lock is contended
50 * @lock : Pointer to queued spinlock structure
51 * Return: 1 if lock contended, 0 otherwise
52 */
53static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
54{
55 return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
56}
57/**
58 * queued_spin_trylock - try to acquire the queued spinlock
59 * @lock : Pointer to queued spinlock structure
60 * Return: 1 if lock acquired, 0 if failed
61 */
62static __always_inline int queued_spin_trylock(struct qspinlock *lock)
63{
64 if (!atomic_read(&lock->val) &&
65 (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0))
66 return 1;
67 return 0;
68}
69
70extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
71
72/**
73 * queued_spin_lock - acquire a queued spinlock
74 * @lock: Pointer to queued spinlock structure
75 */
76static __always_inline void queued_spin_lock(struct qspinlock *lock)
77{
78 u32 val;
79
80 val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
81 if (likely(val == 0))
82 return;
83 queued_spin_lock_slowpath(lock, val);
84}
85
86#ifndef queued_spin_unlock
87/**
88 * queued_spin_unlock - release a queued spinlock
89 * @lock : Pointer to queued spinlock structure
90 */
91static __always_inline void queued_spin_unlock(struct qspinlock *lock)
92{
93 /*
94 * smp_mb__before_atomic() in order to guarantee release semantics
95 */
96 smp_mb__before_atomic_dec();
97 atomic_sub(_Q_LOCKED_VAL, &lock->val);
98}
99#endif
100
101/**
102 * queued_spin_unlock_wait - wait until current lock holder releases the lock
103 * @lock : Pointer to queued spinlock structure
104 *
105 * There is a very slight possibility of live-lock if the lockers keep coming
106 * and the waiter is just unfortunate enough to not see any unlock state.
107 */
108static inline void queued_spin_unlock_wait(struct qspinlock *lock)
109{
110 while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
111 cpu_relax();
112}
113
114/*
115 * Initializier
116 */
117#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) }
118
119/*
120 * Remapping spinlock architecture specific functions to the corresponding
121 * queued spinlock functions.
122 */
123#define arch_spin_is_locked(l) queued_spin_is_locked(l)
124#define arch_spin_is_contended(l) queued_spin_is_contended(l)
125#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
126#define arch_spin_lock(l) queued_spin_lock(l)
127#define arch_spin_trylock(l) queued_spin_trylock(l)
128#define arch_spin_unlock(l) queued_spin_unlock(l)
129#define arch_spin_lock_flags(l, f) queued_spin_lock(l)
130#define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l)
131
132#endif /* __ASM_GENERIC_QSPINLOCK_H */
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
new file mode 100644
index 000000000000..aec05c7ad2f6
--- /dev/null
+++ b/include/asm-generic/qspinlock_types.h
@@ -0,0 +1,58 @@
1/*
2 * Queued spinlock
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 *
16 * Authors: Waiman Long <waiman.long@hp.com>
17 */
18#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
19#define __ASM_GENERIC_QSPINLOCK_TYPES_H
20
21/*
22 * Including atomic.h with PARAVIRT on will cause compilation errors because
23 * of recursive header file incluson via paravirt_types.h. So don't include
24 * it if PARAVIRT is on.
25 */
26#ifndef CONFIG_PARAVIRT
27#include <linux/types.h>
28#include <linux/atomic.h>
29#endif
30
31typedef struct qspinlock {
32 atomic_t val;
33} arch_spinlock_t;
34
35/*
36 * Bitfields in the atomic value:
37 *
38 * 0- 7: locked byte
39 * 8- 9: tail index
40 * 10-31: tail cpu (+1)
41 */
42#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
43 << _Q_ ## type ## _OFFSET)
44#define _Q_LOCKED_OFFSET 0
45#define _Q_LOCKED_BITS 8
46#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
47
48#define _Q_TAIL_IDX_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
49#define _Q_TAIL_IDX_BITS 2
50#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
51
52#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
53#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
54#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
55
56#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
57
58#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 08561f1acd13..95fdad866a98 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -235,6 +235,13 @@ config LOCK_SPIN_ON_OWNER
235 def_bool y 235 def_bool y
236 depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER 236 depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER
237 237
238config ARCH_USE_QUEUED_SPINLOCK
239 bool
240
241config QUEUED_SPINLOCK
242 def_bool y if ARCH_USE_QUEUED_SPINLOCK
243 depends on SMP && !PARAVIRT_SPINLOCKS
244
238config ARCH_USE_QUEUE_RWLOCK 245config ARCH_USE_QUEUE_RWLOCK
239 bool 246 bool
240 247
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index de7a416cca2a..abfcef3c1ef9 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_SMP) += spinlock.o
17obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o 17obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
18obj-$(CONFIG_SMP) += lglock.o 18obj-$(CONFIG_SMP) += lglock.o
19obj-$(CONFIG_PROVE_LOCKING) += spinlock.o 19obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
20obj-$(CONFIG_QUEUED_SPINLOCK) += qspinlock.o
20obj-$(CONFIG_RT_MUTEXES) += rtmutex.o 21obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
21obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o 22obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
22obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o 23obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index 75e114bdf3f2..fd91aaa4554c 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -17,6 +17,7 @@
17struct mcs_spinlock { 17struct mcs_spinlock {
18 struct mcs_spinlock *next; 18 struct mcs_spinlock *next;
19 int locked; /* 1 if lock acquired */ 19 int locked; /* 1 if lock acquired */
20 int count; /* nesting count, see qspinlock.c */
20}; 21};
21 22
22#ifndef arch_mcs_spin_lock_contended 23#ifndef arch_mcs_spin_lock_contended
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
new file mode 100644
index 000000000000..029b51ce10ea
--- /dev/null
+++ b/kernel/locking/qspinlock.c
@@ -0,0 +1,209 @@
1/*
2 * Queued spinlock
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 * (C) Copyright 2013-2014 Red Hat, Inc.
16 * (C) Copyright 2015 Intel Corp.
17 *
18 * Authors: Waiman Long <waiman.long@hp.com>
19 * Peter Zijlstra <peterz@infradead.org>
20 */
21#include <linux/smp.h>
22#include <linux/bug.h>
23#include <linux/cpumask.h>
24#include <linux/percpu.h>
25#include <linux/hardirq.h>
26#include <linux/mutex.h>
27#include <asm/qspinlock.h>
28
29/*
30 * The basic principle of a queue-based spinlock can best be understood
31 * by studying a classic queue-based spinlock implementation called the
32 * MCS lock. The paper below provides a good description for this kind
33 * of lock.
34 *
35 * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
36 *
37 * This queued spinlock implementation is based on the MCS lock, however to make
38 * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
39 * API, we must modify it somehow.
40 *
41 * In particular; where the traditional MCS lock consists of a tail pointer
42 * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
43 * unlock the next pending (next->locked), we compress both these: {tail,
44 * next->locked} into a single u32 value.
45 *
46 * Since a spinlock disables recursion of its own context and there is a limit
47 * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
48 * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
49 * we can encode the tail by combining the 2-bit nesting level with the cpu
50 * number. With one byte for the lock value and 3 bytes for the tail, only a
51 * 32-bit word is now needed. Even though we only need 1 bit for the lock,
52 * we extend it to a full byte to achieve better performance for architectures
53 * that support atomic byte write.
54 *
55 * We also change the first spinner to spin on the lock bit instead of its
56 * node; whereby avoiding the need to carry a node from lock to unlock, and
57 * preserving existing lock API. This also makes the unlock code simpler and
58 * faster.
59 */
60
61#include "mcs_spinlock.h"
62
63/*
64 * Per-CPU queue node structures; we can never have more than 4 nested
65 * contexts: task, softirq, hardirq, nmi.
66 *
67 * Exactly fits one 64-byte cacheline on a 64-bit architecture.
68 */
69static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[4]);
70
71/*
72 * We must be able to distinguish between no-tail and the tail at 0:0,
73 * therefore increment the cpu number by one.
74 */
75
76static inline u32 encode_tail(int cpu, int idx)
77{
78 u32 tail;
79
80#ifdef CONFIG_DEBUG_SPINLOCK
81 BUG_ON(idx > 3);
82#endif
83 tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
84 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
85
86 return tail;
87}
88
89static inline struct mcs_spinlock *decode_tail(u32 tail)
90{
91 int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
92 int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
93
94 return per_cpu_ptr(&mcs_nodes[idx], cpu);
95}
96
97/**
98 * queued_spin_lock_slowpath - acquire the queued spinlock
99 * @lock: Pointer to queued spinlock structure
100 * @val: Current value of the queued spinlock 32-bit word
101 *
102 * (queue tail, lock value)
103 *
104 * fast : slow : unlock
105 * : :
106 * uncontended (0,0) --:--> (0,1) --------------------------------:--> (*,0)
107 * : | ^--------. / :
108 * : v \ | :
109 * uncontended : (n,x) --+--> (n,0) | :
110 * queue : | ^--' | :
111 * : v | :
112 * contended : (*,x) --+--> (*,0) -----> (*,1) ---' :
113 * queue : ^--' :
114 *
115 */
116void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
117{
118 struct mcs_spinlock *prev, *next, *node;
119 u32 new, old, tail;
120 int idx;
121
122 BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
123
124 node = this_cpu_ptr(&mcs_nodes[0]);
125 idx = node->count++;
126 tail = encode_tail(smp_processor_id(), idx);
127
128 node += idx;
129 node->locked = 0;
130 node->next = NULL;
131
132 /*
133 * trylock || xchg(lock, node)
134 *
135 * 0,0 -> 0,1 ; no tail, not locked -> no tail, locked.
136 * p,x -> n,x ; tail was p -> tail is n; preserving locked.
137 */
138 for (;;) {
139 new = _Q_LOCKED_VAL;
140 if (val)
141 new = tail | (val & _Q_LOCKED_MASK);
142
143 old = atomic_cmpxchg(&lock->val, val, new);
144 if (old == val)
145 break;
146
147 val = old;
148 }
149
150 /*
151 * we won the trylock; forget about queueing.
152 */
153 if (new == _Q_LOCKED_VAL)
154 goto release;
155
156 /*
157 * if there was a previous node; link it and wait until reaching the
158 * head of the waitqueue.
159 */
160 if (old & ~_Q_LOCKED_MASK) {
161 prev = decode_tail(old);
162 WRITE_ONCE(prev->next, node);
163
164 arch_mcs_spin_lock_contended(&node->locked);
165 }
166
167 /*
168 * we're at the head of the waitqueue, wait for the owner to go away.
169 *
170 * *,x -> *,0
171 */
172 while ((val = atomic_read(&lock->val)) & _Q_LOCKED_MASK)
173 cpu_relax();
174
175 /*
176 * claim the lock:
177 *
178 * n,0 -> 0,1 : lock, uncontended
179 * *,0 -> *,1 : lock, contended
180 */
181 for (;;) {
182 new = _Q_LOCKED_VAL;
183 if (val != tail)
184 new |= val;
185
186 old = atomic_cmpxchg(&lock->val, val, new);
187 if (old == val)
188 break;
189
190 val = old;
191 }
192
193 /*
194 * contended path; wait for next, release.
195 */
196 if (new != _Q_LOCKED_VAL) {
197 while (!(next = READ_ONCE(node->next)))
198 cpu_relax();
199
200 arch_mcs_spin_unlock_contended(&next->locked);
201 }
202
203release:
204 /*
205 * release the node
206 */
207 this_cpu_dec(mcs_nodes[0].count);
208}
209EXPORT_SYMBOL(queued_spin_lock_slowpath);