diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /include/linux/lglock.h | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'include/linux/lglock.h')
-rw-r--r-- | include/linux/lglock.h | 192 |
1 files changed, 161 insertions, 31 deletions
diff --git a/include/linux/lglock.h b/include/linux/lglock.h index 0d24e932db0..87f402ccec5 100644 --- a/include/linux/lglock.h +++ b/include/linux/lglock.h | |||
@@ -23,48 +23,178 @@ | |||
23 | #include <linux/lockdep.h> | 23 | #include <linux/lockdep.h> |
24 | #include <linux/percpu.h> | 24 | #include <linux/percpu.h> |
25 | #include <linux/cpu.h> | 25 | #include <linux/cpu.h> |
26 | #include <linux/notifier.h> | ||
27 | 26 | ||
28 | /* can make br locks by using local lock for read side, global lock for write */ | 27 | /* can make br locks by using local lock for read side, global lock for write */ |
29 | #define br_lock_init(name) lg_lock_init(name, #name) | 28 | #define br_lock_init(name) name##_lock_init() |
30 | #define br_read_lock(name) lg_local_lock(name) | 29 | #define br_read_lock(name) name##_local_lock() |
31 | #define br_read_unlock(name) lg_local_unlock(name) | 30 | #define br_read_unlock(name) name##_local_unlock() |
32 | #define br_write_lock(name) lg_global_lock(name) | 31 | #define br_write_lock(name) name##_global_lock_online() |
33 | #define br_write_unlock(name) lg_global_unlock(name) | 32 | #define br_write_unlock(name) name##_global_unlock_online() |
34 | 33 | ||
35 | #define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name) | 34 | #define DECLARE_BRLOCK(name) DECLARE_LGLOCK(name) |
36 | #define DEFINE_STATIC_BRLOCK(name) DEFINE_STATIC_LGLOCK(name) | 35 | #define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name) |
36 | |||
37 | |||
38 | #define lg_lock_init(name) name##_lock_init() | ||
39 | #define lg_local_lock(name) name##_local_lock() | ||
40 | #define lg_local_unlock(name) name##_local_unlock() | ||
41 | #define lg_local_lock_cpu(name, cpu) name##_local_lock_cpu(cpu) | ||
42 | #define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu) | ||
43 | #define lg_global_lock(name) name##_global_lock() | ||
44 | #define lg_global_unlock(name) name##_global_unlock() | ||
45 | #define lg_global_lock_online(name) name##_global_lock_online() | ||
46 | #define lg_global_unlock_online(name) name##_global_unlock_online() | ||
37 | 47 | ||
38 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 48 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
39 | #define LOCKDEP_INIT_MAP lockdep_init_map | 49 | #define LOCKDEP_INIT_MAP lockdep_init_map |
50 | |||
51 | #define DEFINE_LGLOCK_LOCKDEP(name) \ | ||
52 | struct lock_class_key name##_lock_key; \ | ||
53 | struct lockdep_map name##_lock_dep_map; \ | ||
54 | EXPORT_SYMBOL(name##_lock_dep_map) | ||
55 | |||
40 | #else | 56 | #else |
41 | #define LOCKDEP_INIT_MAP(a, b, c, d) | 57 | #define LOCKDEP_INIT_MAP(a, b, c, d) |
42 | #endif | ||
43 | 58 | ||
44 | struct lglock { | 59 | #define DEFINE_LGLOCK_LOCKDEP(name) |
45 | arch_spinlock_t __percpu *lock; | ||
46 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
47 | struct lock_class_key lock_key; | ||
48 | struct lockdep_map lock_dep_map; | ||
49 | #endif | 60 | #endif |
50 | }; | ||
51 | |||
52 | #define DEFINE_LGLOCK(name) \ | ||
53 | static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ | ||
54 | = __ARCH_SPIN_LOCK_UNLOCKED; \ | ||
55 | struct lglock name = { .lock = &name ## _lock } | ||
56 | 61 | ||
57 | #define DEFINE_STATIC_LGLOCK(name) \ | ||
58 | static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ | ||
59 | = __ARCH_SPIN_LOCK_UNLOCKED; \ | ||
60 | static struct lglock name = { .lock = &name ## _lock } | ||
61 | 62 | ||
62 | void lg_lock_init(struct lglock *lg, char *name); | 63 | #define DECLARE_LGLOCK(name) \ |
63 | void lg_local_lock(struct lglock *lg); | 64 | extern void name##_lock_init(void); \ |
64 | void lg_local_unlock(struct lglock *lg); | 65 | extern void name##_local_lock(void); \ |
65 | void lg_local_lock_cpu(struct lglock *lg, int cpu); | 66 | extern void name##_local_unlock(void); \ |
66 | void lg_local_unlock_cpu(struct lglock *lg, int cpu); | 67 | extern void name##_local_lock_cpu(int cpu); \ |
67 | void lg_global_lock(struct lglock *lg); | 68 | extern void name##_local_unlock_cpu(int cpu); \ |
68 | void lg_global_unlock(struct lglock *lg); | 69 | extern void name##_global_lock(void); \ |
70 | extern void name##_global_unlock(void); \ | ||
71 | extern void name##_global_lock_online(void); \ | ||
72 | extern void name##_global_unlock_online(void); \ | ||
69 | 73 | ||
74 | #define DEFINE_LGLOCK(name) \ | ||
75 | \ | ||
76 | DEFINE_SPINLOCK(name##_cpu_lock); \ | ||
77 | cpumask_t name##_cpus __read_mostly; \ | ||
78 | DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ | ||
79 | DEFINE_LGLOCK_LOCKDEP(name); \ | ||
80 | \ | ||
81 | static int \ | ||
82 | name##_lg_cpu_callback(struct notifier_block *nb, \ | ||
83 | unsigned long action, void *hcpu) \ | ||
84 | { \ | ||
85 | switch (action & ~CPU_TASKS_FROZEN) { \ | ||
86 | case CPU_UP_PREPARE: \ | ||
87 | spin_lock(&name##_cpu_lock); \ | ||
88 | cpu_set((unsigned long)hcpu, name##_cpus); \ | ||
89 | spin_unlock(&name##_cpu_lock); \ | ||
90 | break; \ | ||
91 | case CPU_UP_CANCELED: case CPU_DEAD: \ | ||
92 | spin_lock(&name##_cpu_lock); \ | ||
93 | cpu_clear((unsigned long)hcpu, name##_cpus); \ | ||
94 | spin_unlock(&name##_cpu_lock); \ | ||
95 | } \ | ||
96 | return NOTIFY_OK; \ | ||
97 | } \ | ||
98 | static struct notifier_block name##_lg_cpu_notifier = { \ | ||
99 | .notifier_call = name##_lg_cpu_callback, \ | ||
100 | }; \ | ||
101 | void name##_lock_init(void) { \ | ||
102 | int i; \ | ||
103 | LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ | ||
104 | for_each_possible_cpu(i) { \ | ||
105 | arch_spinlock_t *lock; \ | ||
106 | lock = &per_cpu(name##_lock, i); \ | ||
107 | *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ | ||
108 | } \ | ||
109 | register_hotcpu_notifier(&name##_lg_cpu_notifier); \ | ||
110 | get_online_cpus(); \ | ||
111 | for_each_online_cpu(i) \ | ||
112 | cpu_set(i, name##_cpus); \ | ||
113 | put_online_cpus(); \ | ||
114 | } \ | ||
115 | EXPORT_SYMBOL(name##_lock_init); \ | ||
116 | \ | ||
117 | void name##_local_lock(void) { \ | ||
118 | arch_spinlock_t *lock; \ | ||
119 | preempt_disable(); \ | ||
120 | rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \ | ||
121 | lock = &__get_cpu_var(name##_lock); \ | ||
122 | arch_spin_lock(lock); \ | ||
123 | } \ | ||
124 | EXPORT_SYMBOL(name##_local_lock); \ | ||
125 | \ | ||
126 | void name##_local_unlock(void) { \ | ||
127 | arch_spinlock_t *lock; \ | ||
128 | rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \ | ||
129 | lock = &__get_cpu_var(name##_lock); \ | ||
130 | arch_spin_unlock(lock); \ | ||
131 | preempt_enable(); \ | ||
132 | } \ | ||
133 | EXPORT_SYMBOL(name##_local_unlock); \ | ||
134 | \ | ||
135 | void name##_local_lock_cpu(int cpu) { \ | ||
136 | arch_spinlock_t *lock; \ | ||
137 | preempt_disable(); \ | ||
138 | rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \ | ||
139 | lock = &per_cpu(name##_lock, cpu); \ | ||
140 | arch_spin_lock(lock); \ | ||
141 | } \ | ||
142 | EXPORT_SYMBOL(name##_local_lock_cpu); \ | ||
143 | \ | ||
144 | void name##_local_unlock_cpu(int cpu) { \ | ||
145 | arch_spinlock_t *lock; \ | ||
146 | rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \ | ||
147 | lock = &per_cpu(name##_lock, cpu); \ | ||
148 | arch_spin_unlock(lock); \ | ||
149 | preempt_enable(); \ | ||
150 | } \ | ||
151 | EXPORT_SYMBOL(name##_local_unlock_cpu); \ | ||
152 | \ | ||
153 | void name##_global_lock_online(void) { \ | ||
154 | int i; \ | ||
155 | spin_lock(&name##_cpu_lock); \ | ||
156 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ | ||
157 | for_each_cpu(i, &name##_cpus) { \ | ||
158 | arch_spinlock_t *lock; \ | ||
159 | lock = &per_cpu(name##_lock, i); \ | ||
160 | arch_spin_lock(lock); \ | ||
161 | } \ | ||
162 | } \ | ||
163 | EXPORT_SYMBOL(name##_global_lock_online); \ | ||
164 | \ | ||
165 | void name##_global_unlock_online(void) { \ | ||
166 | int i; \ | ||
167 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ | ||
168 | for_each_cpu(i, &name##_cpus) { \ | ||
169 | arch_spinlock_t *lock; \ | ||
170 | lock = &per_cpu(name##_lock, i); \ | ||
171 | arch_spin_unlock(lock); \ | ||
172 | } \ | ||
173 | spin_unlock(&name##_cpu_lock); \ | ||
174 | } \ | ||
175 | EXPORT_SYMBOL(name##_global_unlock_online); \ | ||
176 | \ | ||
177 | void name##_global_lock(void) { \ | ||
178 | int i; \ | ||
179 | preempt_disable(); \ | ||
180 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ | ||
181 | for_each_possible_cpu(i) { \ | ||
182 | arch_spinlock_t *lock; \ | ||
183 | lock = &per_cpu(name##_lock, i); \ | ||
184 | arch_spin_lock(lock); \ | ||
185 | } \ | ||
186 | } \ | ||
187 | EXPORT_SYMBOL(name##_global_lock); \ | ||
188 | \ | ||
189 | void name##_global_unlock(void) { \ | ||
190 | int i; \ | ||
191 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ | ||
192 | for_each_possible_cpu(i) { \ | ||
193 | arch_spinlock_t *lock; \ | ||
194 | lock = &per_cpu(name##_lock, i); \ | ||
195 | arch_spin_unlock(lock); \ | ||
196 | } \ | ||
197 | preempt_enable(); \ | ||
198 | } \ | ||
199 | EXPORT_SYMBOL(name##_global_unlock); | ||
70 | #endif | 200 | #endif |