aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2012-05-08 00:02:24 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2012-05-29 23:28:41 -0400
commiteea62f831b8030b0eeea8314eed73b6132d1de26 (patch)
tree7fd58cf63e62484b8d3a0c8c5d2e9193b85945a4 /include
parent9dd6fa03ab31bb57cee4623a689d058d222fbe68 (diff)
brlocks/lglocks: turn into functions
lglocks and brlocks are currently generated with some complicated macros in lglock.h. But there's no reason to not just use common utility functions and put all the data into a common data structure. Since there are at least two users it makes sense to share this code in a library. This is also easier maintainable than a macro forest. This will also make it later possible to dynamically allocate lglocks and also use them in modules (this would both still need some additional, but now straightforward, code) [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Andi Kleen <ak@linux.intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'include')
-rw-r--r--include/linux/lglock.h125
1 files changed, 26 insertions, 99 deletions
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 0fdd821e77b..f01e5f6d1f0 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -23,26 +23,17 @@
23#include <linux/lockdep.h> 23#include <linux/lockdep.h>
24#include <linux/percpu.h> 24#include <linux/percpu.h>
25#include <linux/cpu.h> 25#include <linux/cpu.h>
26#include <linux/notifier.h>
26 27
27/* can make br locks by using local lock for read side, global lock for write */ 28/* can make br locks by using local lock for read side, global lock for write */
28#define br_lock_init(name) name##_lock_init() 29#define br_lock_init(name) lg_lock_init(name, #name)
29#define br_read_lock(name) name##_local_lock() 30#define br_read_lock(name) lg_local_lock(name)
30#define br_read_unlock(name) name##_local_unlock() 31#define br_read_unlock(name) lg_local_unlock(name)
31#define br_write_lock(name) name##_global_lock() 32#define br_write_lock(name) lg_global_lock(name)
32#define br_write_unlock(name) name##_global_unlock() 33#define br_write_unlock(name) lg_global_unlock(name)
33 34
34#define DECLARE_BRLOCK(name) DECLARE_LGLOCK(name)
35#define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name) 35#define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name)
36 36
37
38#define lg_lock_init(name) name##_lock_init()
39#define lg_local_lock(name) name##_local_lock()
40#define lg_local_unlock(name) name##_local_unlock()
41#define lg_local_lock_cpu(name, cpu) name##_local_lock_cpu(cpu)
42#define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu)
43#define lg_global_lock(name) name##_global_lock()
44#define lg_global_unlock(name) name##_global_unlock()
45
46#ifdef CONFIG_DEBUG_LOCK_ALLOC 37#ifdef CONFIG_DEBUG_LOCK_ALLOC
47#define LOCKDEP_INIT_MAP lockdep_init_map 38#define LOCKDEP_INIT_MAP lockdep_init_map
48 39
@@ -57,90 +48,26 @@
57#define DEFINE_LGLOCK_LOCKDEP(name) 48#define DEFINE_LGLOCK_LOCKDEP(name)
58#endif 49#endif
59 50
60 51struct lglock {
61#define DECLARE_LGLOCK(name) \ 52 arch_spinlock_t __percpu *lock;
62 extern void name##_lock_init(void); \ 53#ifdef CONFIG_DEBUG_LOCK_ALLOC
63 extern void name##_local_lock(void); \ 54 struct lock_class_key lock_key;
64 extern void name##_local_unlock(void); \ 55 struct lockdep_map lock_dep_map;
65 extern void name##_local_lock_cpu(int cpu); \ 56#endif
66 extern void name##_local_unlock_cpu(int cpu); \ 57};
67 extern void name##_global_lock(void); \
68 extern void name##_global_unlock(void); \
69 58
70#define DEFINE_LGLOCK(name) \ 59#define DEFINE_LGLOCK(name) \
71 \ 60 DEFINE_LGLOCK_LOCKDEP(name); \
72 DEFINE_SPINLOCK(name##_cpu_lock); \ 61 DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
73 DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ 62 = __ARCH_SPIN_LOCK_UNLOCKED; \
74 DEFINE_LGLOCK_LOCKDEP(name); \ 63 struct lglock name = { .lock = &name ## _lock }
75 \ 64
76 void name##_lock_init(void) { \ 65void lg_lock_init(struct lglock *lg, char *name);
77 int i; \ 66void lg_local_lock(struct lglock *lg);
78 LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ 67void lg_local_unlock(struct lglock *lg);
79 for_each_possible_cpu(i) { \ 68void lg_local_lock_cpu(struct lglock *lg, int cpu);
80 arch_spinlock_t *lock; \ 69void lg_local_unlock_cpu(struct lglock *lg, int cpu);
81 lock = &per_cpu(name##_lock, i); \ 70void lg_global_lock(struct lglock *lg);
82 *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ 71void lg_global_unlock(struct lglock *lg);
83 } \ 72
84 } \
85 EXPORT_SYMBOL(name##_lock_init); \
86 \
87 void name##_local_lock(void) { \
88 arch_spinlock_t *lock; \
89 preempt_disable(); \
90 rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
91 lock = &__get_cpu_var(name##_lock); \
92 arch_spin_lock(lock); \
93 } \
94 EXPORT_SYMBOL(name##_local_lock); \
95 \
96 void name##_local_unlock(void) { \
97 arch_spinlock_t *lock; \
98 rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
99 lock = &__get_cpu_var(name##_lock); \
100 arch_spin_unlock(lock); \
101 preempt_enable(); \
102 } \
103 EXPORT_SYMBOL(name##_local_unlock); \
104 \
105 void name##_local_lock_cpu(int cpu) { \
106 arch_spinlock_t *lock; \
107 preempt_disable(); \
108 rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
109 lock = &per_cpu(name##_lock, cpu); \
110 arch_spin_lock(lock); \
111 } \
112 EXPORT_SYMBOL(name##_local_lock_cpu); \
113 \
114 void name##_local_unlock_cpu(int cpu) { \
115 arch_spinlock_t *lock; \
116 rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
117 lock = &per_cpu(name##_lock, cpu); \
118 arch_spin_unlock(lock); \
119 preempt_enable(); \
120 } \
121 EXPORT_SYMBOL(name##_local_unlock_cpu); \
122 \
123 void name##_global_lock(void) { \
124 int i; \
125 preempt_disable(); \
126 rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
127 for_each_possible_cpu(i) { \
128 arch_spinlock_t *lock; \
129 lock = &per_cpu(name##_lock, i); \
130 arch_spin_lock(lock); \
131 } \
132 } \
133 EXPORT_SYMBOL(name##_global_lock); \
134 \
135 void name##_global_unlock(void) { \
136 int i; \
137 rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
138 for_each_possible_cpu(i) { \
139 arch_spinlock_t *lock; \
140 lock = &per_cpu(name##_lock, i); \
141 arch_spin_unlock(lock); \
142 } \
143 preempt_enable(); \
144 } \
145 EXPORT_SYMBOL(name##_global_unlock);
146#endif 73#endif