aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-11-23 12:36:16 -0500
committerIngo Molnar <mingo@kernel.org>2016-09-22 09:25:56 -0400
commitd32cdbfb0ba319e44f75437afde868f7cafdc467 (patch)
treef268d6a9e22edbebdfb9701e10db788caa0b4151
parente6253970413d99f416f7de8bd516e5f1834d8216 (diff)
locking/lglock: Remove lglock implementation
It is now unused, remove it before someone else thinks its a good idea to use this. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/locking/lglock.txt166
-rw-r--r--include/linux/lglock.h76
-rw-r--r--kernel/locking/Makefile1
-rw-r--r--kernel/locking/lglock.c89
4 files changed, 0 insertions, 332 deletions
diff --git a/Documentation/locking/lglock.txt b/Documentation/locking/lglock.txt
deleted file mode 100644
index a6971e34fabe..000000000000
--- a/Documentation/locking/lglock.txt
+++ /dev/null
@@ -1,166 +0,0 @@
1lglock - local/global locks for mostly local access patterns
2------------------------------------------------------------
3
4Origin: Nick Piggin's VFS scalability series introduced during
5 2.6.35++ [1] [2]
6Location: kernel/locking/lglock.c
7 include/linux/lglock.h
8Users: currently only the VFS and stop_machine related code
9
10Design Goal:
11------------
12
13Improve scalability of globally used large data sets that are
14distributed over all CPUs as per_cpu elements.
15
16To manage global data structures that are partitioned over all CPUs
17as per_cpu elements but can be mostly handled by CPU local actions
18lglock will be used where the majority of accesses are cpu local
19reading and occasional cpu local writing with very infrequent
20global write access.
21
22
23* deal with things locally whenever possible
24 - very fast access to the local per_cpu data
25 - reasonably fast access to specific per_cpu data on a different
26 CPU
27* while making global action possible when needed
28 - by expensive access to all CPUs locks - effectively
29 resulting in a globally visible critical section.
30
31Design:
32-------
33
34Basically it is an array of per_cpu spinlocks with the
35lg_local_lock/unlock accessing the local CPUs lock object and the
36lg_local_lock_cpu/unlock_cpu accessing a remote CPUs lock object
37the lg_local_lock has to disable preemption as migration protection so
38that the reference to the local CPUs lock does not go out of scope.
39Due to the lg_local_lock/unlock only touching cpu-local resources it
40is fast. Taking the local lock on a different CPU will be more
41expensive but still relatively cheap.
42
43One can relax the migration constraints by acquiring the current
44CPUs lock with lg_local_lock_cpu, remember the cpu, and release that
45lock at the end of the critical section even if migrated. This should
46give most of the performance benefits without inhibiting migration
47though needs careful considerations for nesting of lglocks and
48consideration of deadlocks with lg_global_lock.
49
50The lg_global_lock/unlock locks all underlying spinlocks of all
51possible CPUs (including those off-line). The preemption disable/enable
52are needed in the non-RT kernels to prevent deadlocks like:
53
54 on cpu 1
55
56 task A task B
57 lg_global_lock
58 got cpu 0 lock
59 <<<< preempt <<<<
60 lg_local_lock_cpu for cpu 0
61 spin on cpu 0 lock
62
63On -RT this deadlock scenario is resolved by the arch_spin_locks in the
64lglocks being replaced by rt_mutexes which resolve the above deadlock
65by boosting the lock-holder.
66
67
68Implementation:
69---------------
70
71The initial lglock implementation from Nick Piggin used some complex
72macros to generate the lglock/brlock in lglock.h - they were later
73turned into a set of functions by Andi Kleen [7]. The change to functions
74was motivated by the presence of multiple lock users and also by them
75being easier to maintain than the generating macros. This change to
76functions is also the basis to eliminated the restriction of not
77being initializeable in kernel modules (the remaining problem is that
78locks are not explicitly initialized - see lockdep-design.txt)
79
80Declaration and initialization:
81-------------------------------
82
83 #include <linux/lglock.h>
84
85 DEFINE_LGLOCK(name)
86 or:
87 DEFINE_STATIC_LGLOCK(name);
88
89 lg_lock_init(&name, "lockdep_name_string");
90
91 on UP this is mapped to DEFINE_SPINLOCK(name) in both cases, note
92 also that as of 3.18-rc6 all declaration in use are of the _STATIC_
93 variant (and it seems that the non-static was never in use).
94 lg_lock_init is initializing the lockdep map only.
95
96Usage:
97------
98
99From the locking semantics it is a spinlock. It could be called a
100locality aware spinlock. lg_local_* behaves like a per_cpu
101spinlock and lg_global_* like a global spinlock.
102No surprises in the API.
103
104 lg_local_lock(*lglock);
105 access to protected per_cpu object on this CPU
106 lg_local_unlock(*lglock);
107
108 lg_local_lock_cpu(*lglock, cpu);
109 access to protected per_cpu object on other CPU cpu
110 lg_local_unlock_cpu(*lglock, cpu);
111
112 lg_global_lock(*lglock);
113 access all protected per_cpu objects on all CPUs
114 lg_global_unlock(*lglock);
115
116 There are no _trylock variants of the lglocks.
117
118Note that the lg_global_lock/unlock has to iterate over all possible
119CPUs rather than the actually present CPUs or a CPU could go off-line
120with a held lock [4] and that makes it very expensive. A discussion on
121these issues can be found at [5]
122
123Constraints:
124------------
125
126 * currently the declaration of lglocks in kernel modules is not
127 possible, though this should be doable with little change.
128 * lglocks are not recursive.
129 * suitable for code that can do most operations on the CPU local
130 data and will very rarely need the global lock
131 * lg_global_lock/unlock is *very* expensive and does not scale
132 * on UP systems all lg_* primitives are simply spinlocks
133 * in PREEMPT_RT the spinlock becomes an rt-mutex and can sleep but
134 does not change the tasks state while sleeping [6].
135 * in PREEMPT_RT the preempt_disable/enable in lg_local_lock/unlock
136 is downgraded to a migrate_disable/enable, the other
137 preempt_disable/enable are downgraded to barriers [6].
138 The deadlock noted for non-RT above is resolved due to rt_mutexes
139 boosting the lock-holder in this case which arch_spin_locks do
140 not do.
141
142lglocks were designed for very specific problems in the VFS and probably
143only are the right answer in these corner cases. Any new user that looks
144at lglocks probably wants to look at the seqlock and RCU alternatives as
145her first choice. There are also efforts to resolve the RCU issues that
146currently prevent using RCU in place of view remaining lglocks.
147
148Note on brlock history:
149-----------------------
150
151The 'Big Reader' read-write spinlocks were originally introduced by
152Ingo Molnar in 2000 (2.4/2.5 kernel series) and removed in 2003. They
153later were introduced by the VFS scalability patch set in 2.6 series
154again as the "big reader lock" brlock [2] variant of lglock which has
155been replaced by seqlock primitives or by RCU based primitives in the
1563.13 kernel series as was suggested in [3] in 2003. The brlock was
157entirely removed in the 3.13 kernel series.
158
159Link: 1 http://lkml.org/lkml/2010/8/2/81
160Link: 2 http://lwn.net/Articles/401738/
161Link: 3 http://lkml.org/lkml/2003/3/9/205
162Link: 4 https://lkml.org/lkml/2011/8/24/185
163Link: 5 http://lkml.org/lkml/2011/12/18/189
164Link: 6 https://www.kernel.org/pub/linux/kernel/projects/rt/
165 patch series - lglocks-rt.patch.patch
166Link: 7 http://lkml.org/lkml/2012/3/5/26
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
deleted file mode 100644
index 0081f000e34b..000000000000
--- a/include/linux/lglock.h
+++ /dev/null
@@ -1,76 +0,0 @@
1/*
2 * Specialised local-global spinlock. Can only be declared as global variables
3 * to avoid overhead and keep things simple (and we don't want to start using
4 * these inside dynamically allocated structures).
5 *
6 * "local/global locks" (lglocks) can be used to:
7 *
8 * - Provide fast exclusive access to per-CPU data, with exclusive access to
9 * another CPU's data allowed but possibly subject to contention, and to
10 * provide very slow exclusive access to all per-CPU data.
11 * - Or to provide very fast and scalable read serialisation, and to provide
12 * very slow exclusive serialisation of data (not necessarily per-CPU data).
13 *
14 * Brlocks are also implemented as a short-hand notation for the latter use
15 * case.
16 *
17 * Copyright 2009, 2010, Nick Piggin, Novell Inc.
18 */
19#ifndef __LINUX_LGLOCK_H
20#define __LINUX_LGLOCK_H
21
22#include <linux/spinlock.h>
23#include <linux/lockdep.h>
24#include <linux/percpu.h>
25#include <linux/cpu.h>
26#include <linux/notifier.h>
27
28#ifdef CONFIG_SMP
29
30#ifdef CONFIG_DEBUG_LOCK_ALLOC
31#define LOCKDEP_INIT_MAP lockdep_init_map
32#else
33#define LOCKDEP_INIT_MAP(a, b, c, d)
34#endif
35
36struct lglock {
37 arch_spinlock_t __percpu *lock;
38#ifdef CONFIG_DEBUG_LOCK_ALLOC
39 struct lock_class_key lock_key;
40 struct lockdep_map lock_dep_map;
41#endif
42};
43
44#define DEFINE_LGLOCK(name) \
45 static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
46 = __ARCH_SPIN_LOCK_UNLOCKED; \
47 struct lglock name = { .lock = &name ## _lock }
48
49#define DEFINE_STATIC_LGLOCK(name) \
50 static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
51 = __ARCH_SPIN_LOCK_UNLOCKED; \
52 static struct lglock name = { .lock = &name ## _lock }
53
54void lg_lock_init(struct lglock *lg, char *name);
55void lg_local_lock(struct lglock *lg);
56void lg_local_unlock(struct lglock *lg);
57void lg_local_lock_cpu(struct lglock *lg, int cpu);
58void lg_local_unlock_cpu(struct lglock *lg, int cpu);
59void lg_global_lock(struct lglock *lg);
60void lg_global_unlock(struct lglock *lg);
61
62#else
63/* When !CONFIG_SMP, map lglock to spinlock */
64#define lglock spinlock
65#define DEFINE_LGLOCK(name) DEFINE_SPINLOCK(name)
66#define DEFINE_STATIC_LGLOCK(name) static DEFINE_SPINLOCK(name)
67#define lg_lock_init(lg, name) spin_lock_init(lg)
68#define lg_local_lock spin_lock
69#define lg_local_unlock spin_unlock
70#define lg_local_lock_cpu(lg, cpu) spin_lock(lg)
71#define lg_local_unlock_cpu(lg, cpu) spin_unlock(lg)
72#define lg_global_lock spin_lock
73#define lg_global_unlock spin_unlock
74#endif
75
76#endif
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 31322a4275cd..6f88e352cd4f 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
18endif 18endif
19obj-$(CONFIG_SMP) += spinlock.o 19obj-$(CONFIG_SMP) += spinlock.o
20obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o 20obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
21obj-$(CONFIG_SMP) += lglock.o
22obj-$(CONFIG_PROVE_LOCKING) += spinlock.o 21obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
23obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o 22obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o
24obj-$(CONFIG_RT_MUTEXES) += rtmutex.o 23obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
diff --git a/kernel/locking/lglock.c b/kernel/locking/lglock.c
deleted file mode 100644
index 86ae2aebf004..000000000000
--- a/kernel/locking/lglock.c
+++ /dev/null
@@ -1,89 +0,0 @@
1/* See include/linux/lglock.h for description */
2#include <linux/module.h>
3#include <linux/lglock.h>
4#include <linux/cpu.h>
5#include <linux/string.h>
6
7/*
8 * Note there is no uninit, so lglocks cannot be defined in
9 * modules (but it's fine to use them from there)
10 * Could be added though, just undo lg_lock_init
11 */
12
13void lg_lock_init(struct lglock *lg, char *name)
14{
15 LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
16}
17EXPORT_SYMBOL(lg_lock_init);
18
19void lg_local_lock(struct lglock *lg)
20{
21 arch_spinlock_t *lock;
22
23 preempt_disable();
24 lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
25 lock = this_cpu_ptr(lg->lock);
26 arch_spin_lock(lock);
27}
28EXPORT_SYMBOL(lg_local_lock);
29
30void lg_local_unlock(struct lglock *lg)
31{
32 arch_spinlock_t *lock;
33
34 lock_release(&lg->lock_dep_map, 1, _RET_IP_);
35 lock = this_cpu_ptr(lg->lock);
36 arch_spin_unlock(lock);
37 preempt_enable();
38}
39EXPORT_SYMBOL(lg_local_unlock);
40
41void lg_local_lock_cpu(struct lglock *lg, int cpu)
42{
43 arch_spinlock_t *lock;
44
45 preempt_disable();
46 lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
47 lock = per_cpu_ptr(lg->lock, cpu);
48 arch_spin_lock(lock);
49}
50EXPORT_SYMBOL(lg_local_lock_cpu);
51
52void lg_local_unlock_cpu(struct lglock *lg, int cpu)
53{
54 arch_spinlock_t *lock;
55
56 lock_release(&lg->lock_dep_map, 1, _RET_IP_);
57 lock = per_cpu_ptr(lg->lock, cpu);
58 arch_spin_unlock(lock);
59 preempt_enable();
60}
61EXPORT_SYMBOL(lg_local_unlock_cpu);
62
63void lg_global_lock(struct lglock *lg)
64{
65 int i;
66
67 preempt_disable();
68 lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
69 for_each_possible_cpu(i) {
70 arch_spinlock_t *lock;
71 lock = per_cpu_ptr(lg->lock, i);
72 arch_spin_lock(lock);
73 }
74}
75EXPORT_SYMBOL(lg_global_lock);
76
77void lg_global_unlock(struct lglock *lg)
78{
79 int i;
80
81 lock_release(&lg->lock_dep_map, 1, _RET_IP_);
82 for_each_possible_cpu(i) {
83 arch_spinlock_t *lock;
84 lock = per_cpu_ptr(lg->lock, i);
85 arch_spin_unlock(lock);
86 }
87 preempt_enable();
88}
89EXPORT_SYMBOL(lg_global_unlock);