aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2012-05-07 23:59:45 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2012-05-29 23:28:41 -0400
commit9dd6fa03ab31bb57cee4623a689d058d222fbe68 (patch)
tree3f55c3527ecae7a053b0305d0a8f45e8328a416c
parentea022dfb3c2a4680483b00eb2fecc9fc4f6091d1 (diff)
lglock: remove online variants of lock
Optimizing the slow paths adds a lot of complexity. If you need to grab every lock often, you have other problems. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: Nick Piggin <npiggin@kernel.dk> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--include/linux/lglock.h58
1 files changed, 2 insertions, 56 deletions
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 87f402ccec55..0fdd821e77b7 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -28,8 +28,8 @@
28#define br_lock_init(name) name##_lock_init() 28#define br_lock_init(name) name##_lock_init()
29#define br_read_lock(name) name##_local_lock() 29#define br_read_lock(name) name##_local_lock()
30#define br_read_unlock(name) name##_local_unlock() 30#define br_read_unlock(name) name##_local_unlock()
31#define br_write_lock(name) name##_global_lock_online() 31#define br_write_lock(name) name##_global_lock()
32#define br_write_unlock(name) name##_global_unlock_online() 32#define br_write_unlock(name) name##_global_unlock()
33 33
34#define DECLARE_BRLOCK(name) DECLARE_LGLOCK(name) 34#define DECLARE_BRLOCK(name) DECLARE_LGLOCK(name)
35#define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name) 35#define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name)
@@ -42,8 +42,6 @@
42#define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu) 42#define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu)
43#define lg_global_lock(name) name##_global_lock() 43#define lg_global_lock(name) name##_global_lock()
44#define lg_global_unlock(name) name##_global_unlock() 44#define lg_global_unlock(name) name##_global_unlock()
45#define lg_global_lock_online(name) name##_global_lock_online()
46#define lg_global_unlock_online(name) name##_global_unlock_online()
47 45
48#ifdef CONFIG_DEBUG_LOCK_ALLOC 46#ifdef CONFIG_DEBUG_LOCK_ALLOC
49#define LOCKDEP_INIT_MAP lockdep_init_map 47#define LOCKDEP_INIT_MAP lockdep_init_map
@@ -68,36 +66,13 @@
68 extern void name##_local_unlock_cpu(int cpu); \ 66 extern void name##_local_unlock_cpu(int cpu); \
69 extern void name##_global_lock(void); \ 67 extern void name##_global_lock(void); \
70 extern void name##_global_unlock(void); \ 68 extern void name##_global_unlock(void); \
71 extern void name##_global_lock_online(void); \
72 extern void name##_global_unlock_online(void); \
73 69
74#define DEFINE_LGLOCK(name) \ 70#define DEFINE_LGLOCK(name) \
75 \ 71 \
76 DEFINE_SPINLOCK(name##_cpu_lock); \ 72 DEFINE_SPINLOCK(name##_cpu_lock); \
77 cpumask_t name##_cpus __read_mostly; \
78 DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ 73 DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
79 DEFINE_LGLOCK_LOCKDEP(name); \ 74 DEFINE_LGLOCK_LOCKDEP(name); \
80 \ 75 \
81 static int \
82 name##_lg_cpu_callback(struct notifier_block *nb, \
83 unsigned long action, void *hcpu) \
84 { \
85 switch (action & ~CPU_TASKS_FROZEN) { \
86 case CPU_UP_PREPARE: \
87 spin_lock(&name##_cpu_lock); \
88 cpu_set((unsigned long)hcpu, name##_cpus); \
89 spin_unlock(&name##_cpu_lock); \
90 break; \
91 case CPU_UP_CANCELED: case CPU_DEAD: \
92 spin_lock(&name##_cpu_lock); \
93 cpu_clear((unsigned long)hcpu, name##_cpus); \
94 spin_unlock(&name##_cpu_lock); \
95 } \
96 return NOTIFY_OK; \
97 } \
98 static struct notifier_block name##_lg_cpu_notifier = { \
99 .notifier_call = name##_lg_cpu_callback, \
100 }; \
101 void name##_lock_init(void) { \ 76 void name##_lock_init(void) { \
102 int i; \ 77 int i; \
103 LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ 78 LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
@@ -106,11 +81,6 @@
106 lock = &per_cpu(name##_lock, i); \ 81 lock = &per_cpu(name##_lock, i); \
107 *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ 82 *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
108 } \ 83 } \
109 register_hotcpu_notifier(&name##_lg_cpu_notifier); \
110 get_online_cpus(); \
111 for_each_online_cpu(i) \
112 cpu_set(i, name##_cpus); \
113 put_online_cpus(); \
114 } \ 84 } \
115 EXPORT_SYMBOL(name##_lock_init); \ 85 EXPORT_SYMBOL(name##_lock_init); \
116 \ 86 \
@@ -150,30 +120,6 @@
150 } \ 120 } \
151 EXPORT_SYMBOL(name##_local_unlock_cpu); \ 121 EXPORT_SYMBOL(name##_local_unlock_cpu); \
152 \ 122 \
153 void name##_global_lock_online(void) { \
154 int i; \
155 spin_lock(&name##_cpu_lock); \
156 rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
157 for_each_cpu(i, &name##_cpus) { \
158 arch_spinlock_t *lock; \
159 lock = &per_cpu(name##_lock, i); \
160 arch_spin_lock(lock); \
161 } \
162 } \
163 EXPORT_SYMBOL(name##_global_lock_online); \
164 \
165 void name##_global_unlock_online(void) { \
166 int i; \
167 rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
168 for_each_cpu(i, &name##_cpus) { \
169 arch_spinlock_t *lock; \
170 lock = &per_cpu(name##_lock, i); \
171 arch_spin_unlock(lock); \
172 } \
173 spin_unlock(&name##_cpu_lock); \
174 } \
175 EXPORT_SYMBOL(name##_global_unlock_online); \
176 \
177 void name##_global_lock(void) { \ 123 void name##_global_lock(void) { \
178 int i; \ 124 int i; \
179 preempt_disable(); \ 125 preempt_disable(); \