diff options
author | Tejun Heo <tj@kernel.org> | 2014-02-03 14:02:59 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-02-07 18:42:40 -0500 |
commit | 182fd64b66342219d6fcf2b84d337529d120d95c (patch) | |
tree | f8e0377fa5cdf90ea11245fd4f028c07ada4a5cd /fs | |
parent | 988cd7afb3f37598891ca70b4c6eb914c338c46a (diff) |
kernfs: remove KERNFS_ACTIVE_REF and add kernfs_lockdep()
There currently are two mechanisms gating active ref lockdep
annotations - KERNFS_LOCKDEP flag and KERNFS_ACTIVE_REF type mask.
The former disables lockdep annotations in kernfs_get/put_active()
while the latter disables all of kernfs_deactivate().
While KERNFS_ACTIVE_REF also behaves as an optimization to skip the
deactivation step for non-file nodes, the benefit is marginal and it
needlessly diverges code paths. Let's drop KERNFS_ACTIVE_REF.
While at it, add a test helper kernfs_lockdep() to test KERNFS_LOCKDEP
flag so that it's more convenient and the related code can be compiled
out when not enabled.
v2: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag"). As the earlier patch already added
KERNFS_LOCKDEP tests to kernfs_deactivate(), those additions are
dropped from this patch and the existing ones are simply converted
to kernfs_lockdep().
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/kernfs/dir.c | 20 |
1 files changed, 13 insertions, 7 deletions
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 948551d222b4..5cf137b63db9 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c | |||
@@ -22,6 +22,15 @@ DEFINE_MUTEX(kernfs_mutex); | |||
22 | 22 | ||
23 | #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb) | 23 | #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb) |
24 | 24 | ||
25 | static bool kernfs_lockdep(struct kernfs_node *kn) | ||
26 | { | ||
27 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
28 | return kn->flags & KERNFS_LOCKDEP; | ||
29 | #else | ||
30 | return false; | ||
31 | #endif | ||
32 | } | ||
33 | |||
25 | /** | 34 | /** |
26 | * kernfs_name_hash | 35 | * kernfs_name_hash |
27 | * @name: Null terminated string to hash | 36 | * @name: Null terminated string to hash |
@@ -144,7 +153,7 @@ struct kernfs_node *kernfs_get_active(struct kernfs_node *kn) | |||
144 | if (!atomic_inc_unless_negative(&kn->active)) | 153 | if (!atomic_inc_unless_negative(&kn->active)) |
145 | return NULL; | 154 | return NULL; |
146 | 155 | ||
147 | if (kn->flags & KERNFS_LOCKDEP) | 156 | if (kernfs_lockdep(kn)) |
148 | rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_); | 157 | rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_); |
149 | return kn; | 158 | return kn; |
150 | } | 159 | } |
@@ -164,7 +173,7 @@ void kernfs_put_active(struct kernfs_node *kn) | |||
164 | if (unlikely(!kn)) | 173 | if (unlikely(!kn)) |
165 | return; | 174 | return; |
166 | 175 | ||
167 | if (kn->flags & KERNFS_LOCKDEP) | 176 | if (kernfs_lockdep(kn)) |
168 | rwsem_release(&kn->dep_map, 1, _RET_IP_); | 177 | rwsem_release(&kn->dep_map, 1, _RET_IP_); |
169 | v = atomic_dec_return(&kn->active); | 178 | v = atomic_dec_return(&kn->active); |
170 | if (likely(v != KN_DEACTIVATED_BIAS)) | 179 | if (likely(v != KN_DEACTIVATED_BIAS)) |
@@ -190,16 +199,13 @@ static void kernfs_deactivate(struct kernfs_node *kn) | |||
190 | lockdep_assert_held(&kernfs_mutex); | 199 | lockdep_assert_held(&kernfs_mutex); |
191 | BUG_ON(!(kn->flags & KERNFS_REMOVED)); | 200 | BUG_ON(!(kn->flags & KERNFS_REMOVED)); |
192 | 201 | ||
193 | if (!(kernfs_type(kn) & KERNFS_ACTIVE_REF)) | ||
194 | return; | ||
195 | |||
196 | /* only the first invocation on @kn should deactivate it */ | 202 | /* only the first invocation on @kn should deactivate it */ |
197 | if (atomic_read(&kn->active) >= 0) | 203 | if (atomic_read(&kn->active) >= 0) |
198 | atomic_add(KN_DEACTIVATED_BIAS, &kn->active); | 204 | atomic_add(KN_DEACTIVATED_BIAS, &kn->active); |
199 | 205 | ||
200 | mutex_unlock(&kernfs_mutex); | 206 | mutex_unlock(&kernfs_mutex); |
201 | 207 | ||
202 | if (kn->flags & KERNFS_LOCKDEP) { | 208 | if (kernfs_lockdep(kn)) { |
203 | rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_); | 209 | rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_); |
204 | if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS) | 210 | if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS) |
205 | lock_contended(&kn->dep_map, _RET_IP_); | 211 | lock_contended(&kn->dep_map, _RET_IP_); |
@@ -209,7 +215,7 @@ static void kernfs_deactivate(struct kernfs_node *kn) | |||
209 | wait_event(root->deactivate_waitq, | 215 | wait_event(root->deactivate_waitq, |
210 | atomic_read(&kn->active) == KN_DEACTIVATED_BIAS); | 216 | atomic_read(&kn->active) == KN_DEACTIVATED_BIAS); |
211 | 217 | ||
212 | if (kn->flags & KERNFS_LOCKDEP) { | 218 | if (kernfs_lockdep(kn)) { |
213 | lock_acquired(&kn->dep_map, _RET_IP_); | 219 | lock_acquired(&kn->dep_map, _RET_IP_); |
214 | rwsem_release(&kn->dep_map, 1, _RET_IP_); | 220 | rwsem_release(&kn->dep_map, 1, _RET_IP_); |
215 | } | 221 | } |