aboutsummaryrefslogtreecommitdiffstats
path: root/fs/kernfs/dir.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-01-13 17:39:52 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-01-13 17:39:52 -0500
commit0890147fe09ff7e8275a162b1ab76ab5e3158c6d (patch)
tree797d8beb9a38fb4580d2ad628571615528f18028 /fs/kernfs/dir.c
parent798c75a0d44cdbd6e3d82a6a676e6de38525b3bb (diff)
Revert "kernfs: remove KERNFS_ACTIVE_REF and add kernfs_lockdep()"
This reverts commit a69d001cfc712b96ec9d7ba44d6285702a38dabf. Tejun writes: I'm sorry but can you please revert the whole series? get_active() waiting while a node is deactivated has potential to lead to deadlock and that deactivate/reactivate interface is something fundamentally flawed and that cgroup will have to work with the remove_self() like everybody else. IOW, I think the first posting was correct. Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs/kernfs/dir.c')
-rw-r--r--fs/kernfs/dir.c31
1 files changed, 11 insertions, 20 deletions
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 1c9130a33048..ed62de6cdf8f 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -22,15 +22,6 @@ DEFINE_MUTEX(kernfs_mutex);
22 22
23#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb) 23#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
24 24
25static bool kernfs_lockdep(struct kernfs_node *kn)
26{
27#ifdef CONFIG_DEBUG_LOCK_ALLOC
28 return kn->flags & KERNFS_LOCKDEP;
29#else
30 return false;
31#endif
32}
33
34/** 25/**
35 * kernfs_name_hash 26 * kernfs_name_hash
36 * @name: Null terminated string to hash 27 * @name: Null terminated string to hash
@@ -147,7 +138,7 @@ struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
147 if (!atomic_inc_unless_negative(&kn->active)) 138 if (!atomic_inc_unless_negative(&kn->active))
148 return NULL; 139 return NULL;
149 140
150 if (kernfs_lockdep(kn)) 141 if (kn->flags & KERNFS_LOCKDEP)
151 rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_); 142 rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_);
152 return kn; 143 return kn;
153} 144}
@@ -167,7 +158,7 @@ void kernfs_put_active(struct kernfs_node *kn)
167 if (unlikely(!kn)) 158 if (unlikely(!kn))
168 return; 159 return;
169 160
170 if (kernfs_lockdep(kn)) 161 if (kn->flags & KERNFS_LOCKDEP)
171 rwsem_release(&kn->dep_map, 1, _RET_IP_); 162 rwsem_release(&kn->dep_map, 1, _RET_IP_);
172 v = atomic_dec_return(&kn->active); 163 v = atomic_dec_return(&kn->active);
173 if (likely(v != KN_DEACTIVATED_BIAS)) 164 if (likely(v != KN_DEACTIVATED_BIAS))
@@ -188,21 +179,21 @@ static void kernfs_deactivate(struct kernfs_node *kn)
188 179
189 BUG_ON(!(kn->flags & KERNFS_REMOVED)); 180 BUG_ON(!(kn->flags & KERNFS_REMOVED));
190 181
182 if (!(kernfs_type(kn) & KERNFS_ACTIVE_REF))
183 return;
184
185 rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
186
191 atomic_add(KN_DEACTIVATED_BIAS, &kn->active); 187 atomic_add(KN_DEACTIVATED_BIAS, &kn->active);
192 188
193 if (kernfs_lockdep(kn)) { 189 if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS)
194 rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_); 190 lock_contended(&kn->dep_map, _RET_IP_);
195 if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS)
196 lock_contended(&kn->dep_map, _RET_IP_);
197 }
198 191
199 wait_event(root->deactivate_waitq, 192 wait_event(root->deactivate_waitq,
200 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS); 193 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
201 194
202 if (kernfs_lockdep(kn)) { 195 lock_acquired(&kn->dep_map, _RET_IP_);
203 lock_acquired(&kn->dep_map, _RET_IP_); 196 rwsem_release(&kn->dep_map, 1, _RET_IP_);
204 rwsem_release(&kn->dep_map, 1, _RET_IP_);
205 }
206} 197}
207 198
208/** 199/**