aboutsummaryrefslogtreecommitdiffstats
path: root/fs/pnode.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2014-03-20 21:10:51 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2014-03-30 19:18:51 -0400
commit38129a13e6e71f666e0468e99fdd932a687b4d7e (patch)
tree438e817fdf7d224f9fda1186eb24b1bbc37a4b5c /fs/pnode.c
parent0b1b901b5a98bb36943d10820efc796f7cd45ff3 (diff)
switch mnt_hash to hlist
fixes RCU bug - walking through hlist is safe in face of element moves, since it's self-terminating. Cyclic lists are not - if we end up jumping to another hash chain, we'll loop infinitely without ever hitting the original list head. [fix for dumb braino folded] Spotted by: Max Kellermann <mk@cm4all.com> Cc: stable@vger.kernel.org Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/pnode.c')
-rw-r--r--fs/pnode.c26
1 files changed, 15 insertions, 11 deletions
diff --git a/fs/pnode.c b/fs/pnode.c
index c7221bb19801..88396df725b4 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -220,14 +220,14 @@ static struct mount *get_source(struct mount *dest,
220 * @tree_list : list of heads of trees to be attached. 220 * @tree_list : list of heads of trees to be attached.
221 */ 221 */
222int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, 222int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
223 struct mount *source_mnt, struct list_head *tree_list) 223 struct mount *source_mnt, struct hlist_head *tree_list)
224{ 224{
225 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; 225 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
226 struct mount *m, *child; 226 struct mount *m, *child;
227 int ret = 0; 227 int ret = 0;
228 struct mount *prev_dest_mnt = dest_mnt; 228 struct mount *prev_dest_mnt = dest_mnt;
229 struct mount *prev_src_mnt = source_mnt; 229 struct mount *prev_src_mnt = source_mnt;
230 LIST_HEAD(tmp_list); 230 HLIST_HEAD(tmp_list);
231 231
232 for (m = propagation_next(dest_mnt, dest_mnt); m; 232 for (m = propagation_next(dest_mnt, dest_mnt); m;
233 m = propagation_next(m, dest_mnt)) { 233 m = propagation_next(m, dest_mnt)) {
@@ -246,27 +246,29 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
246 child = copy_tree(source, source->mnt.mnt_root, type); 246 child = copy_tree(source, source->mnt.mnt_root, type);
247 if (IS_ERR(child)) { 247 if (IS_ERR(child)) {
248 ret = PTR_ERR(child); 248 ret = PTR_ERR(child);
249 list_splice(tree_list, tmp_list.prev); 249 tmp_list = *tree_list;
250 tmp_list.first->pprev = &tmp_list.first;
251 INIT_HLIST_HEAD(tree_list);
250 goto out; 252 goto out;
251 } 253 }
252 254
253 if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) { 255 if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) {
254 mnt_set_mountpoint(m, dest_mp, child); 256 mnt_set_mountpoint(m, dest_mp, child);
255 list_add_tail(&child->mnt_hash, tree_list); 257 hlist_add_head(&child->mnt_hash, tree_list);
256 } else { 258 } else {
257 /* 259 /*
258 * This can happen if the parent mount was bind mounted 260 * This can happen if the parent mount was bind mounted
259 * on some subdirectory of a shared/slave mount. 261 * on some subdirectory of a shared/slave mount.
260 */ 262 */
261 list_add_tail(&child->mnt_hash, &tmp_list); 263 hlist_add_head(&child->mnt_hash, &tmp_list);
262 } 264 }
263 prev_dest_mnt = m; 265 prev_dest_mnt = m;
264 prev_src_mnt = child; 266 prev_src_mnt = child;
265 } 267 }
266out: 268out:
267 lock_mount_hash(); 269 lock_mount_hash();
268 while (!list_empty(&tmp_list)) { 270 while (!hlist_empty(&tmp_list)) {
269 child = list_first_entry(&tmp_list, struct mount, mnt_hash); 271 child = hlist_entry(tmp_list.first, struct mount, mnt_hash);
270 umount_tree(child, 0); 272 umount_tree(child, 0);
271 } 273 }
272 unlock_mount_hash(); 274 unlock_mount_hash();
@@ -338,8 +340,10 @@ static void __propagate_umount(struct mount *mnt)
338 * umount the child only if the child has no 340 * umount the child only if the child has no
339 * other children 341 * other children
340 */ 342 */
341 if (child && list_empty(&child->mnt_mounts)) 343 if (child && list_empty(&child->mnt_mounts)) {
342 list_move_tail(&child->mnt_hash, &mnt->mnt_hash); 344 hlist_del_init_rcu(&child->mnt_hash);
345 hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
346 }
343 } 347 }
344} 348}
345 349
@@ -350,11 +354,11 @@ static void __propagate_umount(struct mount *mnt)
350 * 354 *
351 * vfsmount lock must be held for write 355 * vfsmount lock must be held for write
352 */ 356 */
353int propagate_umount(struct list_head *list) 357int propagate_umount(struct hlist_head *list)
354{ 358{
355 struct mount *mnt; 359 struct mount *mnt;
356 360
357 list_for_each_entry(mnt, list, mnt_hash) 361 hlist_for_each_entry(mnt, list, mnt_hash)
358 __propagate_umount(mnt); 362 __propagate_umount(mnt);
359 return 0; 363 return 0;
360} 364}