aboutsummaryrefslogtreecommitdiffstats
path: root/fs/kernfs/dir.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-01-10 08:57:20 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-01-10 16:44:25 -0500
commita69d001cfc712b96ec9d7ba44d6285702a38dabf (patch)
tree295a4f2c57a883d45596078a28278cb054bf9a02 /fs/kernfs/dir.c
parentea1c472dfeada211a0100daa7976e8e8e779b858 (diff)
kernfs: remove KERNFS_ACTIVE_REF and add kernfs_lockdep()
There currently are two mechanisms gating active ref lockdep annotations - KERNFS_LOCKDEP flag and KERNFS_ACTIVE_REF type mask. The former disables lockdep annotations in kernfs_get/put_active() while the latter disables all of kernfs_deactivate(). While KERNFS_ACTIVE_REF also behaves as an optimization to skip the deactivation step for non-file nodes, the benefit is marginal and it needlessly diverges code paths. Let's drop KERNFS_ACTIVE_REF and use KERNFS_LOCKDEP in kernfs_deactivate() too. While at it, add a test helper kernfs_lockdep() to test KERNFS_LOCKDEP flag so that it's more convenient and the related code can be compiled out when not enabled. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs/kernfs/dir.c')
-rw-r--r--fs/kernfs/dir.c31
1 files changed, 20 insertions, 11 deletions
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index ed62de6cdf8f..1c9130a33048 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -22,6 +22,15 @@ DEFINE_MUTEX(kernfs_mutex);
22 22
23#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb) 23#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
24 24
25static bool kernfs_lockdep(struct kernfs_node *kn)
26{
27#ifdef CONFIG_DEBUG_LOCK_ALLOC
28 return kn->flags & KERNFS_LOCKDEP;
29#else
30 return false;
31#endif
32}
33
25/** 34/**
26 * kernfs_name_hash 35 * kernfs_name_hash
27 * @name: Null terminated string to hash 36 * @name: Null terminated string to hash
@@ -138,7 +147,7 @@ struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
138 if (!atomic_inc_unless_negative(&kn->active)) 147 if (!atomic_inc_unless_negative(&kn->active))
139 return NULL; 148 return NULL;
140 149
141 if (kn->flags & KERNFS_LOCKDEP) 150 if (kernfs_lockdep(kn))
142 rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_); 151 rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_);
143 return kn; 152 return kn;
144} 153}
@@ -158,7 +167,7 @@ void kernfs_put_active(struct kernfs_node *kn)
158 if (unlikely(!kn)) 167 if (unlikely(!kn))
159 return; 168 return;
160 169
161 if (kn->flags & KERNFS_LOCKDEP) 170 if (kernfs_lockdep(kn))
162 rwsem_release(&kn->dep_map, 1, _RET_IP_); 171 rwsem_release(&kn->dep_map, 1, _RET_IP_);
163 v = atomic_dec_return(&kn->active); 172 v = atomic_dec_return(&kn->active);
164 if (likely(v != KN_DEACTIVATED_BIAS)) 173 if (likely(v != KN_DEACTIVATED_BIAS))
@@ -179,21 +188,21 @@ static void kernfs_deactivate(struct kernfs_node *kn)
179 188
180 BUG_ON(!(kn->flags & KERNFS_REMOVED)); 189 BUG_ON(!(kn->flags & KERNFS_REMOVED));
181 190
182 if (!(kernfs_type(kn) & KERNFS_ACTIVE_REF))
183 return;
184
185 rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
186
187 atomic_add(KN_DEACTIVATED_BIAS, &kn->active); 191 atomic_add(KN_DEACTIVATED_BIAS, &kn->active);
188 192
189 if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS) 193 if (kernfs_lockdep(kn)) {
190 lock_contended(&kn->dep_map, _RET_IP_); 194 rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
195 if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS)
196 lock_contended(&kn->dep_map, _RET_IP_);
197 }
191 198
192 wait_event(root->deactivate_waitq, 199 wait_event(root->deactivate_waitq,
193 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS); 200 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
194 201
195 lock_acquired(&kn->dep_map, _RET_IP_); 202 if (kernfs_lockdep(kn)) {
196 rwsem_release(&kn->dep_map, 1, _RET_IP_); 203 lock_acquired(&kn->dep_map, _RET_IP_);
204 rwsem_release(&kn->dep_map, 1, _RET_IP_);
205 }
197} 206}
198 207
199/** 208/**