summaryrefslogtreecommitdiffstats
path: root/fs/namespace.c
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2014-12-24 08:20:01 -0500
committerEric W. Biederman <ebiederm@xmission.com>2015-04-02 21:34:17 -0400
commite819f152104c9f7c9fe50e1aecce6f5d4bf06d65 (patch)
treee2f08411e078cce21fe838aea1ed7d9ca3be7812 /fs/namespace.c
parenta3b3c5627c8301ac850962b04f645dfab81e6a60 (diff)
mnt: Improve the umount_tree flags
- Remove the unneeded declaration from pnode.h - Mark umount_tree static as it has no callers outside of namespace.c - Define an enumeration of umount_tree's flags. - Pass umount_tree's flags in by name This removes the magic numbers 0, 1 and 2 making the code a little clearer and makes it possible for there to be lazy unmounts that don't propagate. Which is what __detach_mounts actually wants for example. Cc: stable@vger.kernel.org Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Diffstat (limited to 'fs/namespace.c')
-rw-r--r--fs/namespace.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/fs/namespace.c b/fs/namespace.c
index e1ee57206eef..e06e36777b90 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1317,14 +1317,15 @@ static inline void namespace_lock(void)
1317 down_write(&namespace_sem); 1317 down_write(&namespace_sem);
1318} 1318}
1319 1319
1320enum umount_tree_flags {
1321 UMOUNT_SYNC = 1,
1322 UMOUNT_PROPAGATE = 2,
1323};
1320/* 1324/*
1321 * mount_lock must be held 1325 * mount_lock must be held
1322 * namespace_sem must be held for write 1326 * namespace_sem must be held for write
1323 * how = 0 => just this tree, don't propagate
1324 * how = 1 => propagate; we know that nobody else has reference to any victims
1325 * how = 2 => lazy umount
1326 */ 1327 */
1327void umount_tree(struct mount *mnt, int how) 1328static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1328{ 1329{
1329 HLIST_HEAD(tmp_list); 1330 HLIST_HEAD(tmp_list);
1330 struct mount *p; 1331 struct mount *p;
@@ -1337,7 +1338,7 @@ void umount_tree(struct mount *mnt, int how)
1337 hlist_for_each_entry(p, &tmp_list, mnt_hash) 1338 hlist_for_each_entry(p, &tmp_list, mnt_hash)
1338 list_del_init(&p->mnt_child); 1339 list_del_init(&p->mnt_child);
1339 1340
1340 if (how) 1341 if (how & UMOUNT_PROPAGATE)
1341 propagate_umount(&tmp_list); 1342 propagate_umount(&tmp_list);
1342 1343
1343 while (!hlist_empty(&tmp_list)) { 1344 while (!hlist_empty(&tmp_list)) {
@@ -1347,7 +1348,7 @@ void umount_tree(struct mount *mnt, int how)
1347 list_del_init(&p->mnt_list); 1348 list_del_init(&p->mnt_list);
1348 __touch_mnt_namespace(p->mnt_ns); 1349 __touch_mnt_namespace(p->mnt_ns);
1349 p->mnt_ns = NULL; 1350 p->mnt_ns = NULL;
1350 if (how < 2) 1351 if (how & UMOUNT_SYNC)
1351 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; 1352 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1352 1353
1353 pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, &unmounted); 1354 pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, &unmounted);
@@ -1445,14 +1446,14 @@ static int do_umount(struct mount *mnt, int flags)
1445 1446
1446 if (flags & MNT_DETACH) { 1447 if (flags & MNT_DETACH) {
1447 if (!list_empty(&mnt->mnt_list)) 1448 if (!list_empty(&mnt->mnt_list))
1448 umount_tree(mnt, 2); 1449 umount_tree(mnt, UMOUNT_PROPAGATE);
1449 retval = 0; 1450 retval = 0;
1450 } else { 1451 } else {
1451 shrink_submounts(mnt); 1452 shrink_submounts(mnt);
1452 retval = -EBUSY; 1453 retval = -EBUSY;
1453 if (!propagate_mount_busy(mnt, 2)) { 1454 if (!propagate_mount_busy(mnt, 2)) {
1454 if (!list_empty(&mnt->mnt_list)) 1455 if (!list_empty(&mnt->mnt_list))
1455 umount_tree(mnt, 1); 1456 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
1456 retval = 0; 1457 retval = 0;
1457 } 1458 }
1458 } 1459 }
@@ -1484,7 +1485,7 @@ void __detach_mounts(struct dentry *dentry)
1484 lock_mount_hash(); 1485 lock_mount_hash();
1485 while (!hlist_empty(&mp->m_list)) { 1486 while (!hlist_empty(&mp->m_list)) {
1486 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); 1487 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1487 umount_tree(mnt, 2); 1488 umount_tree(mnt, UMOUNT_PROPAGATE);
1488 } 1489 }
1489 unlock_mount_hash(); 1490 unlock_mount_hash();
1490 put_mountpoint(mp); 1491 put_mountpoint(mp);
@@ -1646,7 +1647,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1646out: 1647out:
1647 if (res) { 1648 if (res) {
1648 lock_mount_hash(); 1649 lock_mount_hash();
1649 umount_tree(res, 0); 1650 umount_tree(res, UMOUNT_SYNC);
1650 unlock_mount_hash(); 1651 unlock_mount_hash();
1651 } 1652 }
1652 return q; 1653 return q;
@@ -1670,7 +1671,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
1670{ 1671{
1671 namespace_lock(); 1672 namespace_lock();
1672 lock_mount_hash(); 1673 lock_mount_hash();
1673 umount_tree(real_mount(mnt), 0); 1674 umount_tree(real_mount(mnt), UMOUNT_SYNC);
1674 unlock_mount_hash(); 1675 unlock_mount_hash();
1675 namespace_unlock(); 1676 namespace_unlock();
1676} 1677}
@@ -1853,7 +1854,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
1853 out_cleanup_ids: 1854 out_cleanup_ids:
1854 while (!hlist_empty(&tree_list)) { 1855 while (!hlist_empty(&tree_list)) {
1855 child = hlist_entry(tree_list.first, struct mount, mnt_hash); 1856 child = hlist_entry(tree_list.first, struct mount, mnt_hash);
1856 umount_tree(child, 0); 1857 umount_tree(child, UMOUNT_SYNC);
1857 } 1858 }
1858 unlock_mount_hash(); 1859 unlock_mount_hash();
1859 cleanup_group_ids(source_mnt, NULL); 1860 cleanup_group_ids(source_mnt, NULL);
@@ -2033,7 +2034,7 @@ static int do_loopback(struct path *path, const char *old_name,
2033 err = graft_tree(mnt, parent, mp); 2034 err = graft_tree(mnt, parent, mp);
2034 if (err) { 2035 if (err) {
2035 lock_mount_hash(); 2036 lock_mount_hash();
2036 umount_tree(mnt, 0); 2037 umount_tree(mnt, UMOUNT_SYNC);
2037 unlock_mount_hash(); 2038 unlock_mount_hash();
2038 } 2039 }
2039out2: 2040out2:
@@ -2404,7 +2405,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
2404 while (!list_empty(&graveyard)) { 2405 while (!list_empty(&graveyard)) {
2405 mnt = list_first_entry(&graveyard, struct mount, mnt_expire); 2406 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
2406 touch_mnt_namespace(mnt->mnt_ns); 2407 touch_mnt_namespace(mnt->mnt_ns);
2407 umount_tree(mnt, 1); 2408 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
2408 } 2409 }
2409 unlock_mount_hash(); 2410 unlock_mount_hash();
2410 namespace_unlock(); 2411 namespace_unlock();
@@ -2475,7 +2476,7 @@ static void shrink_submounts(struct mount *mnt)
2475 m = list_first_entry(&graveyard, struct mount, 2476 m = list_first_entry(&graveyard, struct mount,
2476 mnt_expire); 2477 mnt_expire);
2477 touch_mnt_namespace(m->mnt_ns); 2478 touch_mnt_namespace(m->mnt_ns);
2478 umount_tree(m, 1); 2479 umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
2479 } 2480 }
2480 } 2481 }
2481} 2482}