diff options
Diffstat (limited to 'fs/namespace.c')
| -rw-r--r-- | fs/namespace.c | 142 |
1 files changed, 96 insertions, 46 deletions
diff --git a/fs/namespace.c b/fs/namespace.c index 82ef1405260e..1f4f9dac6e5a 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
| @@ -632,14 +632,17 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) | |||
| 632 | */ | 632 | */ |
| 633 | struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry) | 633 | struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry) |
| 634 | { | 634 | { |
| 635 | struct mount *p, *res; | 635 | struct mount *p, *res = NULL; |
| 636 | res = p = __lookup_mnt(mnt, dentry); | 636 | p = __lookup_mnt(mnt, dentry); |
| 637 | if (!p) | 637 | if (!p) |
| 638 | goto out; | 638 | goto out; |
| 639 | if (!(p->mnt.mnt_flags & MNT_UMOUNT)) | ||
| 640 | res = p; | ||
| 639 | hlist_for_each_entry_continue(p, mnt_hash) { | 641 | hlist_for_each_entry_continue(p, mnt_hash) { |
| 640 | if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry) | 642 | if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry) |
| 641 | break; | 643 | break; |
| 642 | res = p; | 644 | if (!(p->mnt.mnt_flags & MNT_UMOUNT)) |
| 645 | res = p; | ||
| 643 | } | 646 | } |
| 644 | out: | 647 | out: |
| 645 | return res; | 648 | return res; |
| @@ -795,10 +798,8 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns) | |||
| 795 | /* | 798 | /* |
| 796 | * vfsmount lock must be held for write | 799 | * vfsmount lock must be held for write |
| 797 | */ | 800 | */ |
| 798 | static void detach_mnt(struct mount *mnt, struct path *old_path) | 801 | static void unhash_mnt(struct mount *mnt) |
| 799 | { | 802 | { |
| 800 | old_path->dentry = mnt->mnt_mountpoint; | ||
| 801 | old_path->mnt = &mnt->mnt_parent->mnt; | ||
| 802 | mnt->mnt_parent = mnt; | 803 | mnt->mnt_parent = mnt; |
| 803 | mnt->mnt_mountpoint = mnt->mnt.mnt_root; | 804 | mnt->mnt_mountpoint = mnt->mnt.mnt_root; |
| 804 | list_del_init(&mnt->mnt_child); | 805 | list_del_init(&mnt->mnt_child); |
| @@ -811,6 +812,26 @@ static void detach_mnt(struct mount *mnt, struct path *old_path) | |||
| 811 | /* | 812 | /* |
| 812 | * vfsmount lock must be held for write | 813 | * vfsmount lock must be held for write |
| 813 | */ | 814 | */ |
| 815 | static void detach_mnt(struct mount *mnt, struct path *old_path) | ||
| 816 | { | ||
| 817 | old_path->dentry = mnt->mnt_mountpoint; | ||
| 818 | old_path->mnt = &mnt->mnt_parent->mnt; | ||
| 819 | unhash_mnt(mnt); | ||
| 820 | } | ||
| 821 | |||
| 822 | /* | ||
| 823 | * vfsmount lock must be held for write | ||
| 824 | */ | ||
| 825 | static void umount_mnt(struct mount *mnt) | ||
| 826 | { | ||
| 827 | /* old mountpoint will be dropped when we can do that */ | ||
| 828 | mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint; | ||
| 829 | unhash_mnt(mnt); | ||
| 830 | } | ||
| 831 | |||
| 832 | /* | ||
| 833 | * vfsmount lock must be held for write | ||
| 834 | */ | ||
| 814 | void mnt_set_mountpoint(struct mount *mnt, | 835 | void mnt_set_mountpoint(struct mount *mnt, |
| 815 | struct mountpoint *mp, | 836 | struct mountpoint *mp, |
| 816 | struct mount *child_mnt) | 837 | struct mount *child_mnt) |
| @@ -1078,6 +1099,13 @@ static void mntput_no_expire(struct mount *mnt) | |||
| 1078 | rcu_read_unlock(); | 1099 | rcu_read_unlock(); |
| 1079 | 1100 | ||
| 1080 | list_del(&mnt->mnt_instance); | 1101 | list_del(&mnt->mnt_instance); |
| 1102 | |||
| 1103 | if (unlikely(!list_empty(&mnt->mnt_mounts))) { | ||
| 1104 | struct mount *p, *tmp; | ||
| 1105 | list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { | ||
| 1106 | umount_mnt(p); | ||
| 1107 | } | ||
| 1108 | } | ||
| 1081 | unlock_mount_hash(); | 1109 | unlock_mount_hash(); |
| 1082 | 1110 | ||
| 1083 | if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) { | 1111 | if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) { |
| @@ -1298,17 +1326,15 @@ static HLIST_HEAD(unmounted); /* protected by namespace_sem */ | |||
| 1298 | 1326 | ||
| 1299 | static void namespace_unlock(void) | 1327 | static void namespace_unlock(void) |
| 1300 | { | 1328 | { |
| 1301 | struct hlist_head head = unmounted; | 1329 | struct hlist_head head; |
| 1302 | 1330 | ||
| 1303 | if (likely(hlist_empty(&head))) { | 1331 | hlist_move_list(&unmounted, &head); |
| 1304 | up_write(&namespace_sem); | ||
| 1305 | return; | ||
| 1306 | } | ||
| 1307 | 1332 | ||
| 1308 | head.first->pprev = &head.first; | ||
| 1309 | INIT_HLIST_HEAD(&unmounted); | ||
| 1310 | up_write(&namespace_sem); | 1333 | up_write(&namespace_sem); |
| 1311 | 1334 | ||
| 1335 | if (likely(hlist_empty(&head))) | ||
| 1336 | return; | ||
| 1337 | |||
| 1312 | synchronize_rcu(); | 1338 | synchronize_rcu(); |
| 1313 | 1339 | ||
| 1314 | group_pin_kill(&head); | 1340 | group_pin_kill(&head); |
| @@ -1319,49 +1345,63 @@ static inline void namespace_lock(void) | |||
| 1319 | down_write(&namespace_sem); | 1345 | down_write(&namespace_sem); |
| 1320 | } | 1346 | } |
| 1321 | 1347 | ||
| 1348 | enum umount_tree_flags { | ||
| 1349 | UMOUNT_SYNC = 1, | ||
| 1350 | UMOUNT_PROPAGATE = 2, | ||
| 1351 | UMOUNT_CONNECTED = 4, | ||
| 1352 | }; | ||
| 1322 | /* | 1353 | /* |
| 1323 | * mount_lock must be held | 1354 | * mount_lock must be held |
| 1324 | * namespace_sem must be held for write | 1355 | * namespace_sem must be held for write |
| 1325 | * how = 0 => just this tree, don't propagate | ||
| 1326 | * how = 1 => propagate; we know that nobody else has reference to any victims | ||
| 1327 | * how = 2 => lazy umount | ||
| 1328 | */ | 1356 | */ |
| 1329 | void umount_tree(struct mount *mnt, int how) | 1357 | static void umount_tree(struct mount *mnt, enum umount_tree_flags how) |
| 1330 | { | 1358 | { |
| 1331 | HLIST_HEAD(tmp_list); | 1359 | LIST_HEAD(tmp_list); |
| 1332 | struct mount *p; | 1360 | struct mount *p; |
| 1333 | 1361 | ||
| 1362 | if (how & UMOUNT_PROPAGATE) | ||
| 1363 | propagate_mount_unlock(mnt); | ||
| 1364 | |||
| 1365 | /* Gather the mounts to umount */ | ||
| 1334 | for (p = mnt; p; p = next_mnt(p, mnt)) { | 1366 | for (p = mnt; p; p = next_mnt(p, mnt)) { |
| 1335 | hlist_del_init_rcu(&p->mnt_hash); | 1367 | p->mnt.mnt_flags |= MNT_UMOUNT; |
| 1336 | hlist_add_head(&p->mnt_hash, &tmp_list); | 1368 | list_move(&p->mnt_list, &tmp_list); |
| 1337 | } | 1369 | } |
| 1338 | 1370 | ||
| 1339 | hlist_for_each_entry(p, &tmp_list, mnt_hash) | 1371 | /* Hide the mounts from mnt_mounts */ |
| 1372 | list_for_each_entry(p, &tmp_list, mnt_list) { | ||
| 1340 | list_del_init(&p->mnt_child); | 1373 | list_del_init(&p->mnt_child); |
| 1374 | } | ||
| 1341 | 1375 | ||
| 1342 | if (how) | 1376 | /* Add propogated mounts to the tmp_list */ |
| 1377 | if (how & UMOUNT_PROPAGATE) | ||
| 1343 | propagate_umount(&tmp_list); | 1378 | propagate_umount(&tmp_list); |
| 1344 | 1379 | ||
| 1345 | while (!hlist_empty(&tmp_list)) { | 1380 | while (!list_empty(&tmp_list)) { |
| 1346 | p = hlist_entry(tmp_list.first, struct mount, mnt_hash); | 1381 | bool disconnect; |
| 1347 | hlist_del_init_rcu(&p->mnt_hash); | 1382 | p = list_first_entry(&tmp_list, struct mount, mnt_list); |
| 1348 | list_del_init(&p->mnt_expire); | 1383 | list_del_init(&p->mnt_expire); |
| 1349 | list_del_init(&p->mnt_list); | 1384 | list_del_init(&p->mnt_list); |
| 1350 | __touch_mnt_namespace(p->mnt_ns); | 1385 | __touch_mnt_namespace(p->mnt_ns); |
| 1351 | p->mnt_ns = NULL; | 1386 | p->mnt_ns = NULL; |
| 1352 | if (how < 2) | 1387 | if (how & UMOUNT_SYNC) |
| 1353 | p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; | 1388 | p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; |
| 1354 | 1389 | ||
| 1355 | pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, &unmounted); | 1390 | disconnect = !(((how & UMOUNT_CONNECTED) && |
| 1391 | mnt_has_parent(p) && | ||
| 1392 | (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) || | ||
| 1393 | IS_MNT_LOCKED_AND_LAZY(p)); | ||
| 1394 | |||
| 1395 | pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, | ||
| 1396 | disconnect ? &unmounted : NULL); | ||
| 1356 | if (mnt_has_parent(p)) { | 1397 | if (mnt_has_parent(p)) { |
| 1357 | hlist_del_init(&p->mnt_mp_list); | ||
| 1358 | put_mountpoint(p->mnt_mp); | ||
| 1359 | mnt_add_count(p->mnt_parent, -1); | 1398 | mnt_add_count(p->mnt_parent, -1); |
| 1360 | /* old mountpoint will be dropped when we can do that */ | 1399 | if (!disconnect) { |
| 1361 | p->mnt_ex_mountpoint = p->mnt_mountpoint; | 1400 | /* Don't forget about p */ |
| 1362 | p->mnt_mountpoint = p->mnt.mnt_root; | 1401 | list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); |
| 1363 | p->mnt_parent = p; | 1402 | } else { |
| 1364 | p->mnt_mp = NULL; | 1403 | umount_mnt(p); |
| 1404 | } | ||
| 1365 | } | 1405 | } |
| 1366 | change_mnt_propagation(p, MS_PRIVATE); | 1406 | change_mnt_propagation(p, MS_PRIVATE); |
| 1367 | } | 1407 | } |
| @@ -1447,14 +1487,14 @@ static int do_umount(struct mount *mnt, int flags) | |||
| 1447 | 1487 | ||
| 1448 | if (flags & MNT_DETACH) { | 1488 | if (flags & MNT_DETACH) { |
| 1449 | if (!list_empty(&mnt->mnt_list)) | 1489 | if (!list_empty(&mnt->mnt_list)) |
| 1450 | umount_tree(mnt, 2); | 1490 | umount_tree(mnt, UMOUNT_PROPAGATE); |
| 1451 | retval = 0; | 1491 | retval = 0; |
| 1452 | } else { | 1492 | } else { |
| 1453 | shrink_submounts(mnt); | 1493 | shrink_submounts(mnt); |
| 1454 | retval = -EBUSY; | 1494 | retval = -EBUSY; |
| 1455 | if (!propagate_mount_busy(mnt, 2)) { | 1495 | if (!propagate_mount_busy(mnt, 2)) { |
| 1456 | if (!list_empty(&mnt->mnt_list)) | 1496 | if (!list_empty(&mnt->mnt_list)) |
| 1457 | umount_tree(mnt, 1); | 1497 | umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); |
| 1458 | retval = 0; | 1498 | retval = 0; |
| 1459 | } | 1499 | } |
| 1460 | } | 1500 | } |
| @@ -1480,13 +1520,20 @@ void __detach_mounts(struct dentry *dentry) | |||
| 1480 | 1520 | ||
| 1481 | namespace_lock(); | 1521 | namespace_lock(); |
| 1482 | mp = lookup_mountpoint(dentry); | 1522 | mp = lookup_mountpoint(dentry); |
| 1483 | if (!mp) | 1523 | if (IS_ERR_OR_NULL(mp)) |
| 1484 | goto out_unlock; | 1524 | goto out_unlock; |
| 1485 | 1525 | ||
| 1486 | lock_mount_hash(); | 1526 | lock_mount_hash(); |
| 1487 | while (!hlist_empty(&mp->m_list)) { | 1527 | while (!hlist_empty(&mp->m_list)) { |
| 1488 | mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); | 1528 | mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); |
| 1489 | umount_tree(mnt, 2); | 1529 | if (mnt->mnt.mnt_flags & MNT_UMOUNT) { |
| 1530 | struct mount *p, *tmp; | ||
| 1531 | list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { | ||
| 1532 | hlist_add_head(&p->mnt_umount.s_list, &unmounted); | ||
| 1533 | umount_mnt(p); | ||
| 1534 | } | ||
| 1535 | } | ||
| 1536 | else umount_tree(mnt, UMOUNT_CONNECTED); | ||
| 1490 | } | 1537 | } |
| 1491 | unlock_mount_hash(); | 1538 | unlock_mount_hash(); |
| 1492 | put_mountpoint(mp); | 1539 | put_mountpoint(mp); |
| @@ -1648,7 +1695,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, | |||
| 1648 | out: | 1695 | out: |
| 1649 | if (res) { | 1696 | if (res) { |
| 1650 | lock_mount_hash(); | 1697 | lock_mount_hash(); |
| 1651 | umount_tree(res, 0); | 1698 | umount_tree(res, UMOUNT_SYNC); |
| 1652 | unlock_mount_hash(); | 1699 | unlock_mount_hash(); |
| 1653 | } | 1700 | } |
| 1654 | return q; | 1701 | return q; |
| @@ -1660,8 +1707,11 @@ struct vfsmount *collect_mounts(struct path *path) | |||
| 1660 | { | 1707 | { |
| 1661 | struct mount *tree; | 1708 | struct mount *tree; |
| 1662 | namespace_lock(); | 1709 | namespace_lock(); |
| 1663 | tree = copy_tree(real_mount(path->mnt), path->dentry, | 1710 | if (!check_mnt(real_mount(path->mnt))) |
| 1664 | CL_COPY_ALL | CL_PRIVATE); | 1711 | tree = ERR_PTR(-EINVAL); |
| 1712 | else | ||
| 1713 | tree = copy_tree(real_mount(path->mnt), path->dentry, | ||
| 1714 | CL_COPY_ALL | CL_PRIVATE); | ||
| 1665 | namespace_unlock(); | 1715 | namespace_unlock(); |
| 1666 | if (IS_ERR(tree)) | 1716 | if (IS_ERR(tree)) |
| 1667 | return ERR_CAST(tree); | 1717 | return ERR_CAST(tree); |
| @@ -1672,7 +1722,7 @@ void drop_collected_mounts(struct vfsmount *mnt) | |||
| 1672 | { | 1722 | { |
| 1673 | namespace_lock(); | 1723 | namespace_lock(); |
| 1674 | lock_mount_hash(); | 1724 | lock_mount_hash(); |
| 1675 | umount_tree(real_mount(mnt), 0); | 1725 | umount_tree(real_mount(mnt), UMOUNT_SYNC); |
| 1676 | unlock_mount_hash(); | 1726 | unlock_mount_hash(); |
| 1677 | namespace_unlock(); | 1727 | namespace_unlock(); |
| 1678 | } | 1728 | } |
| @@ -1855,7 +1905,7 @@ static int attach_recursive_mnt(struct mount *source_mnt, | |||
| 1855 | out_cleanup_ids: | 1905 | out_cleanup_ids: |
| 1856 | while (!hlist_empty(&tree_list)) { | 1906 | while (!hlist_empty(&tree_list)) { |
| 1857 | child = hlist_entry(tree_list.first, struct mount, mnt_hash); | 1907 | child = hlist_entry(tree_list.first, struct mount, mnt_hash); |
| 1858 | umount_tree(child, 0); | 1908 | umount_tree(child, UMOUNT_SYNC); |
| 1859 | } | 1909 | } |
| 1860 | unlock_mount_hash(); | 1910 | unlock_mount_hash(); |
| 1861 | cleanup_group_ids(source_mnt, NULL); | 1911 | cleanup_group_ids(source_mnt, NULL); |
| @@ -2035,7 +2085,7 @@ static int do_loopback(struct path *path, const char *old_name, | |||
| 2035 | err = graft_tree(mnt, parent, mp); | 2085 | err = graft_tree(mnt, parent, mp); |
| 2036 | if (err) { | 2086 | if (err) { |
| 2037 | lock_mount_hash(); | 2087 | lock_mount_hash(); |
| 2038 | umount_tree(mnt, 0); | 2088 | umount_tree(mnt, UMOUNT_SYNC); |
| 2039 | unlock_mount_hash(); | 2089 | unlock_mount_hash(); |
| 2040 | } | 2090 | } |
| 2041 | out2: | 2091 | out2: |
| @@ -2406,7 +2456,7 @@ void mark_mounts_for_expiry(struct list_head *mounts) | |||
| 2406 | while (!list_empty(&graveyard)) { | 2456 | while (!list_empty(&graveyard)) { |
| 2407 | mnt = list_first_entry(&graveyard, struct mount, mnt_expire); | 2457 | mnt = list_first_entry(&graveyard, struct mount, mnt_expire); |
| 2408 | touch_mnt_namespace(mnt->mnt_ns); | 2458 | touch_mnt_namespace(mnt->mnt_ns); |
| 2409 | umount_tree(mnt, 1); | 2459 | umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); |
| 2410 | } | 2460 | } |
| 2411 | unlock_mount_hash(); | 2461 | unlock_mount_hash(); |
| 2412 | namespace_unlock(); | 2462 | namespace_unlock(); |
| @@ -2477,7 +2527,7 @@ static void shrink_submounts(struct mount *mnt) | |||
| 2477 | m = list_first_entry(&graveyard, struct mount, | 2527 | m = list_first_entry(&graveyard, struct mount, |
| 2478 | mnt_expire); | 2528 | mnt_expire); |
| 2479 | touch_mnt_namespace(m->mnt_ns); | 2529 | touch_mnt_namespace(m->mnt_ns); |
| 2480 | umount_tree(m, 1); | 2530 | umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC); |
| 2481 | } | 2531 | } |
| 2482 | } | 2532 | } |
| 2483 | } | 2533 | } |
