diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-12-24 00:47:28 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-12-24 00:47:28 -0500 |
commit | a22681fabb1564d00d54e804ec95ba9330d857ed (patch) | |
tree | cd1dd7d3027a0a1c25506e37c32579bc388c4287 /include | |
parent | 6d451c578c7222c7e9305c2e776a654dc6ec06c3 (diff) | |
parent | e30e2fdfe56288576ee9e04dbb06b4bd5f282203 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
VFS: Fix race between CPU hotplug and lglocks
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/lglock.h | 36 |
1 files changed, 32 insertions, 4 deletions
diff --git a/include/linux/lglock.h b/include/linux/lglock.h index f549056fb20b..87f402ccec55 100644 --- a/include/linux/lglock.h +++ b/include/linux/lglock.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
23 | #include <linux/lockdep.h> | 23 | #include <linux/lockdep.h> |
24 | #include <linux/percpu.h> | 24 | #include <linux/percpu.h> |
25 | #include <linux/cpu.h> | ||
25 | 26 | ||
26 | /* can make br locks by using local lock for read side, global lock for write */ | 27 | /* can make br locks by using local lock for read side, global lock for write */ |
27 | #define br_lock_init(name) name##_lock_init() | 28 | #define br_lock_init(name) name##_lock_init() |
@@ -72,9 +73,31 @@ | |||
72 | 73 | ||
73 | #define DEFINE_LGLOCK(name) \ | 74 | #define DEFINE_LGLOCK(name) \ |
74 | \ | 75 | \ |
76 | DEFINE_SPINLOCK(name##_cpu_lock); \ | ||
77 | cpumask_t name##_cpus __read_mostly; \ | ||
75 | DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ | 78 | DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ |
76 | DEFINE_LGLOCK_LOCKDEP(name); \ | 79 | DEFINE_LGLOCK_LOCKDEP(name); \ |
77 | \ | 80 | \ |
81 | static int \ | ||
82 | name##_lg_cpu_callback(struct notifier_block *nb, \ | ||
83 | unsigned long action, void *hcpu) \ | ||
84 | { \ | ||
85 | switch (action & ~CPU_TASKS_FROZEN) { \ | ||
86 | case CPU_UP_PREPARE: \ | ||
87 | spin_lock(&name##_cpu_lock); \ | ||
88 | cpu_set((unsigned long)hcpu, name##_cpus); \ | ||
89 | spin_unlock(&name##_cpu_lock); \ | ||
90 | break; \ | ||
91 | case CPU_UP_CANCELED: case CPU_DEAD: \ | ||
92 | spin_lock(&name##_cpu_lock); \ | ||
93 | cpu_clear((unsigned long)hcpu, name##_cpus); \ | ||
94 | spin_unlock(&name##_cpu_lock); \ | ||
95 | } \ | ||
96 | return NOTIFY_OK; \ | ||
97 | } \ | ||
98 | static struct notifier_block name##_lg_cpu_notifier = { \ | ||
99 | .notifier_call = name##_lg_cpu_callback, \ | ||
100 | }; \ | ||
78 | void name##_lock_init(void) { \ | 101 | void name##_lock_init(void) { \ |
79 | int i; \ | 102 | int i; \ |
80 | LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ | 103 | LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ |
@@ -83,6 +106,11 @@ | |||
83 | lock = &per_cpu(name##_lock, i); \ | 106 | lock = &per_cpu(name##_lock, i); \ |
84 | *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ | 107 | *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ |
85 | } \ | 108 | } \ |
109 | register_hotcpu_notifier(&name##_lg_cpu_notifier); \ | ||
110 | get_online_cpus(); \ | ||
111 | for_each_online_cpu(i) \ | ||
112 | cpu_set(i, name##_cpus); \ | ||
113 | put_online_cpus(); \ | ||
86 | } \ | 114 | } \ |
87 | EXPORT_SYMBOL(name##_lock_init); \ | 115 | EXPORT_SYMBOL(name##_lock_init); \ |
88 | \ | 116 | \ |
@@ -124,9 +152,9 @@ | |||
124 | \ | 152 | \ |
125 | void name##_global_lock_online(void) { \ | 153 | void name##_global_lock_online(void) { \ |
126 | int i; \ | 154 | int i; \ |
127 | preempt_disable(); \ | 155 | spin_lock(&name##_cpu_lock); \ |
128 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ | 156 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ |
129 | for_each_online_cpu(i) { \ | 157 | for_each_cpu(i, &name##_cpus) { \ |
130 | arch_spinlock_t *lock; \ | 158 | arch_spinlock_t *lock; \ |
131 | lock = &per_cpu(name##_lock, i); \ | 159 | lock = &per_cpu(name##_lock, i); \ |
132 | arch_spin_lock(lock); \ | 160 | arch_spin_lock(lock); \ |
@@ -137,12 +165,12 @@ | |||
137 | void name##_global_unlock_online(void) { \ | 165 | void name##_global_unlock_online(void) { \ |
138 | int i; \ | 166 | int i; \ |
139 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ | 167 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ |
140 | for_each_online_cpu(i) { \ | 168 | for_each_cpu(i, &name##_cpus) { \ |
141 | arch_spinlock_t *lock; \ | 169 | arch_spinlock_t *lock; \ |
142 | lock = &per_cpu(name##_lock, i); \ | 170 | lock = &per_cpu(name##_lock, i); \ |
143 | arch_spin_unlock(lock); \ | 171 | arch_spin_unlock(lock); \ |
144 | } \ | 172 | } \ |
145 | preempt_enable(); \ | 173 | spin_unlock(&name##_cpu_lock); \ |
146 | } \ | 174 | } \ |
147 | EXPORT_SYMBOL(name##_global_unlock_online); \ | 175 | EXPORT_SYMBOL(name##_global_unlock_online); \ |
148 | \ | 176 | \ |