aboutsummaryrefslogtreecommitdiffstats
path: root/fs/namespace.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/namespace.c')
-rw-r--r--fs/namespace.c142
1 files changed, 72 insertions, 70 deletions
diff --git a/fs/namespace.c b/fs/namespace.c
index e6081996c9a2..1e4a5fe3d7b7 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -397,7 +397,7 @@ static int mnt_make_readonly(struct mount *mnt)
397{ 397{
398 int ret = 0; 398 int ret = 0;
399 399
400 br_write_lock(vfsmount_lock); 400 br_write_lock(&vfsmount_lock);
401 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; 401 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
402 /* 402 /*
403 * After storing MNT_WRITE_HOLD, we'll read the counters. This store 403 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
@@ -431,15 +431,15 @@ static int mnt_make_readonly(struct mount *mnt)
431 */ 431 */
432 smp_wmb(); 432 smp_wmb();
433 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 433 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
434 br_write_unlock(vfsmount_lock); 434 br_write_unlock(&vfsmount_lock);
435 return ret; 435 return ret;
436} 436}
437 437
438static void __mnt_unmake_readonly(struct mount *mnt) 438static void __mnt_unmake_readonly(struct mount *mnt)
439{ 439{
440 br_write_lock(vfsmount_lock); 440 br_write_lock(&vfsmount_lock);
441 mnt->mnt.mnt_flags &= ~MNT_READONLY; 441 mnt->mnt.mnt_flags &= ~MNT_READONLY;
442 br_write_unlock(vfsmount_lock); 442 br_write_unlock(&vfsmount_lock);
443} 443}
444 444
445int sb_prepare_remount_readonly(struct super_block *sb) 445int sb_prepare_remount_readonly(struct super_block *sb)
@@ -451,7 +451,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)
451 if (atomic_long_read(&sb->s_remove_count)) 451 if (atomic_long_read(&sb->s_remove_count))
452 return -EBUSY; 452 return -EBUSY;
453 453
454 br_write_lock(vfsmount_lock); 454 br_write_lock(&vfsmount_lock);
455 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { 455 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
456 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { 456 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
457 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; 457 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
@@ -473,7 +473,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)
473 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) 473 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
474 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 474 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
475 } 475 }
476 br_write_unlock(vfsmount_lock); 476 br_write_unlock(&vfsmount_lock);
477 477
478 return err; 478 return err;
479} 479}
@@ -522,14 +522,14 @@ struct vfsmount *lookup_mnt(struct path *path)
522{ 522{
523 struct mount *child_mnt; 523 struct mount *child_mnt;
524 524
525 br_read_lock(vfsmount_lock); 525 br_read_lock(&vfsmount_lock);
526 child_mnt = __lookup_mnt(path->mnt, path->dentry, 1); 526 child_mnt = __lookup_mnt(path->mnt, path->dentry, 1);
527 if (child_mnt) { 527 if (child_mnt) {
528 mnt_add_count(child_mnt, 1); 528 mnt_add_count(child_mnt, 1);
529 br_read_unlock(vfsmount_lock); 529 br_read_unlock(&vfsmount_lock);
530 return &child_mnt->mnt; 530 return &child_mnt->mnt;
531 } else { 531 } else {
532 br_read_unlock(vfsmount_lock); 532 br_read_unlock(&vfsmount_lock);
533 return NULL; 533 return NULL;
534 } 534 }
535} 535}
@@ -714,9 +714,9 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
714 mnt->mnt.mnt_sb = root->d_sb; 714 mnt->mnt.mnt_sb = root->d_sb;
715 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 715 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
716 mnt->mnt_parent = mnt; 716 mnt->mnt_parent = mnt;
717 br_write_lock(vfsmount_lock); 717 br_write_lock(&vfsmount_lock);
718 list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts); 718 list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
719 br_write_unlock(vfsmount_lock); 719 br_write_unlock(&vfsmount_lock);
720 return &mnt->mnt; 720 return &mnt->mnt;
721} 721}
722EXPORT_SYMBOL_GPL(vfs_kern_mount); 722EXPORT_SYMBOL_GPL(vfs_kern_mount);
@@ -745,9 +745,9 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
745 mnt->mnt.mnt_root = dget(root); 745 mnt->mnt.mnt_root = dget(root);
746 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 746 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
747 mnt->mnt_parent = mnt; 747 mnt->mnt_parent = mnt;
748 br_write_lock(vfsmount_lock); 748 br_write_lock(&vfsmount_lock);
749 list_add_tail(&mnt->mnt_instance, &sb->s_mounts); 749 list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
750 br_write_unlock(vfsmount_lock); 750 br_write_unlock(&vfsmount_lock);
751 751
752 if (flag & CL_SLAVE) { 752 if (flag & CL_SLAVE) {
753 list_add(&mnt->mnt_slave, &old->mnt_slave_list); 753 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
@@ -803,35 +803,36 @@ static void mntput_no_expire(struct mount *mnt)
803{ 803{
804put_again: 804put_again:
805#ifdef CONFIG_SMP 805#ifdef CONFIG_SMP
806 br_read_lock(vfsmount_lock); 806 br_read_lock(&vfsmount_lock);
807 if (likely(atomic_read(&mnt->mnt_longterm))) { 807 if (likely(atomic_read(&mnt->mnt_longterm))) {
808 mnt_add_count(mnt, -1); 808 mnt_add_count(mnt, -1);
809 br_read_unlock(vfsmount_lock); 809 br_read_unlock(&vfsmount_lock);
810 return; 810 return;
811 } 811 }
812 br_read_unlock(vfsmount_lock); 812 br_read_unlock(&vfsmount_lock);
813 813
814 br_write_lock(vfsmount_lock); 814 br_write_lock(&vfsmount_lock);
815 mnt_add_count(mnt, -1); 815 mnt_add_count(mnt, -1);
816 if (mnt_get_count(mnt)) { 816 if (mnt_get_count(mnt)) {
817 br_write_unlock(vfsmount_lock); 817 br_write_unlock(&vfsmount_lock);
818 return; 818 return;
819 } 819 }
820#else 820#else
821 mnt_add_count(mnt, -1); 821 mnt_add_count(mnt, -1);
822 if (likely(mnt_get_count(mnt))) 822 if (likely(mnt_get_count(mnt)))
823 return; 823 return;
824 br_write_lock(vfsmount_lock); 824 br_write_lock(&vfsmount_lock);
825#endif 825#endif
826 if (unlikely(mnt->mnt_pinned)) { 826 if (unlikely(mnt->mnt_pinned)) {
827 mnt_add_count(mnt, mnt->mnt_pinned + 1); 827 mnt_add_count(mnt, mnt->mnt_pinned + 1);
828 mnt->mnt_pinned = 0; 828 mnt->mnt_pinned = 0;
829 br_write_unlock(vfsmount_lock); 829 br_write_unlock(&vfsmount_lock);
830 acct_auto_close_mnt(&mnt->mnt); 830 acct_auto_close_mnt(&mnt->mnt);
831 goto put_again; 831 goto put_again;
832 } 832 }
833
833 list_del(&mnt->mnt_instance); 834 list_del(&mnt->mnt_instance);
834 br_write_unlock(vfsmount_lock); 835 br_write_unlock(&vfsmount_lock);
835 mntfree(mnt); 836 mntfree(mnt);
836} 837}
837 838
@@ -857,21 +858,21 @@ EXPORT_SYMBOL(mntget);
857 858
858void mnt_pin(struct vfsmount *mnt) 859void mnt_pin(struct vfsmount *mnt)
859{ 860{
860 br_write_lock(vfsmount_lock); 861 br_write_lock(&vfsmount_lock);
861 real_mount(mnt)->mnt_pinned++; 862 real_mount(mnt)->mnt_pinned++;
862 br_write_unlock(vfsmount_lock); 863 br_write_unlock(&vfsmount_lock);
863} 864}
864EXPORT_SYMBOL(mnt_pin); 865EXPORT_SYMBOL(mnt_pin);
865 866
866void mnt_unpin(struct vfsmount *m) 867void mnt_unpin(struct vfsmount *m)
867{ 868{
868 struct mount *mnt = real_mount(m); 869 struct mount *mnt = real_mount(m);
869 br_write_lock(vfsmount_lock); 870 br_write_lock(&vfsmount_lock);
870 if (mnt->mnt_pinned) { 871 if (mnt->mnt_pinned) {
871 mnt_add_count(mnt, 1); 872 mnt_add_count(mnt, 1);
872 mnt->mnt_pinned--; 873 mnt->mnt_pinned--;
873 } 874 }
874 br_write_unlock(vfsmount_lock); 875 br_write_unlock(&vfsmount_lock);
875} 876}
876EXPORT_SYMBOL(mnt_unpin); 877EXPORT_SYMBOL(mnt_unpin);
877 878
@@ -988,12 +989,12 @@ int may_umount_tree(struct vfsmount *m)
988 BUG_ON(!m); 989 BUG_ON(!m);
989 990
990 /* write lock needed for mnt_get_count */ 991 /* write lock needed for mnt_get_count */
991 br_write_lock(vfsmount_lock); 992 br_write_lock(&vfsmount_lock);
992 for (p = mnt; p; p = next_mnt(p, mnt)) { 993 for (p = mnt; p; p = next_mnt(p, mnt)) {
993 actual_refs += mnt_get_count(p); 994 actual_refs += mnt_get_count(p);
994 minimum_refs += 2; 995 minimum_refs += 2;
995 } 996 }
996 br_write_unlock(vfsmount_lock); 997 br_write_unlock(&vfsmount_lock);
997 998
998 if (actual_refs > minimum_refs) 999 if (actual_refs > minimum_refs)
999 return 0; 1000 return 0;
@@ -1020,10 +1021,10 @@ int may_umount(struct vfsmount *mnt)
1020{ 1021{
1021 int ret = 1; 1022 int ret = 1;
1022 down_read(&namespace_sem); 1023 down_read(&namespace_sem);
1023 br_write_lock(vfsmount_lock); 1024 br_write_lock(&vfsmount_lock);
1024 if (propagate_mount_busy(real_mount(mnt), 2)) 1025 if (propagate_mount_busy(real_mount(mnt), 2))
1025 ret = 0; 1026 ret = 0;
1026 br_write_unlock(vfsmount_lock); 1027 br_write_unlock(&vfsmount_lock);
1027 up_read(&namespace_sem); 1028 up_read(&namespace_sem);
1028 return ret; 1029 return ret;
1029} 1030}
@@ -1040,13 +1041,13 @@ void release_mounts(struct list_head *head)
1040 struct dentry *dentry; 1041 struct dentry *dentry;
1041 struct mount *m; 1042 struct mount *m;
1042 1043
1043 br_write_lock(vfsmount_lock); 1044 br_write_lock(&vfsmount_lock);
1044 dentry = mnt->mnt_mountpoint; 1045 dentry = mnt->mnt_mountpoint;
1045 m = mnt->mnt_parent; 1046 m = mnt->mnt_parent;
1046 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 1047 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1047 mnt->mnt_parent = mnt; 1048 mnt->mnt_parent = mnt;
1048 m->mnt_ghosts--; 1049 m->mnt_ghosts--;
1049 br_write_unlock(vfsmount_lock); 1050 br_write_unlock(&vfsmount_lock);
1050 dput(dentry); 1051 dput(dentry);
1051 mntput(&m->mnt); 1052 mntput(&m->mnt);
1052 } 1053 }
@@ -1073,8 +1074,9 @@ void umount_tree(struct mount *mnt, int propagate, struct list_head *kill)
1073 list_del_init(&p->mnt_expire); 1074 list_del_init(&p->mnt_expire);
1074 list_del_init(&p->mnt_list); 1075 list_del_init(&p->mnt_list);
1075 __touch_mnt_namespace(p->mnt_ns); 1076 __touch_mnt_namespace(p->mnt_ns);
1077 if (p->mnt_ns)
1078 __mnt_make_shortterm(p);
1076 p->mnt_ns = NULL; 1079 p->mnt_ns = NULL;
1077 __mnt_make_shortterm(p);
1078 list_del_init(&p->mnt_child); 1080 list_del_init(&p->mnt_child);
1079 if (mnt_has_parent(p)) { 1081 if (mnt_has_parent(p)) {
1080 p->mnt_parent->mnt_ghosts++; 1082 p->mnt_parent->mnt_ghosts++;
@@ -1112,12 +1114,12 @@ static int do_umount(struct mount *mnt, int flags)
1112 * probably don't strictly need the lock here if we examined 1114 * probably don't strictly need the lock here if we examined
1113 * all race cases, but it's a slowpath. 1115 * all race cases, but it's a slowpath.
1114 */ 1116 */
1115 br_write_lock(vfsmount_lock); 1117 br_write_lock(&vfsmount_lock);
1116 if (mnt_get_count(mnt) != 2) { 1118 if (mnt_get_count(mnt) != 2) {
1117 br_write_unlock(vfsmount_lock); 1119 br_write_unlock(&vfsmount_lock);
1118 return -EBUSY; 1120 return -EBUSY;
1119 } 1121 }
1120 br_write_unlock(vfsmount_lock); 1122 br_write_unlock(&vfsmount_lock);
1121 1123
1122 if (!xchg(&mnt->mnt_expiry_mark, 1)) 1124 if (!xchg(&mnt->mnt_expiry_mark, 1))
1123 return -EAGAIN; 1125 return -EAGAIN;
@@ -1159,7 +1161,7 @@ static int do_umount(struct mount *mnt, int flags)
1159 } 1161 }
1160 1162
1161 down_write(&namespace_sem); 1163 down_write(&namespace_sem);
1162 br_write_lock(vfsmount_lock); 1164 br_write_lock(&vfsmount_lock);
1163 event++; 1165 event++;
1164 1166
1165 if (!(flags & MNT_DETACH)) 1167 if (!(flags & MNT_DETACH))
@@ -1171,7 +1173,7 @@ static int do_umount(struct mount *mnt, int flags)
1171 umount_tree(mnt, 1, &umount_list); 1173 umount_tree(mnt, 1, &umount_list);
1172 retval = 0; 1174 retval = 0;
1173 } 1175 }
1174 br_write_unlock(vfsmount_lock); 1176 br_write_unlock(&vfsmount_lock);
1175 up_write(&namespace_sem); 1177 up_write(&namespace_sem);
1176 release_mounts(&umount_list); 1178 release_mounts(&umount_list);
1177 return retval; 1179 return retval;
@@ -1286,19 +1288,19 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1286 q = clone_mnt(p, p->mnt.mnt_root, flag); 1288 q = clone_mnt(p, p->mnt.mnt_root, flag);
1287 if (!q) 1289 if (!q)
1288 goto Enomem; 1290 goto Enomem;
1289 br_write_lock(vfsmount_lock); 1291 br_write_lock(&vfsmount_lock);
1290 list_add_tail(&q->mnt_list, &res->mnt_list); 1292 list_add_tail(&q->mnt_list, &res->mnt_list);
1291 attach_mnt(q, &path); 1293 attach_mnt(q, &path);
1292 br_write_unlock(vfsmount_lock); 1294 br_write_unlock(&vfsmount_lock);
1293 } 1295 }
1294 } 1296 }
1295 return res; 1297 return res;
1296Enomem: 1298Enomem:
1297 if (res) { 1299 if (res) {
1298 LIST_HEAD(umount_list); 1300 LIST_HEAD(umount_list);
1299 br_write_lock(vfsmount_lock); 1301 br_write_lock(&vfsmount_lock);
1300 umount_tree(res, 0, &umount_list); 1302 umount_tree(res, 0, &umount_list);
1301 br_write_unlock(vfsmount_lock); 1303 br_write_unlock(&vfsmount_lock);
1302 release_mounts(&umount_list); 1304 release_mounts(&umount_list);
1303 } 1305 }
1304 return NULL; 1306 return NULL;
@@ -1318,9 +1320,9 @@ void drop_collected_mounts(struct vfsmount *mnt)
1318{ 1320{
1319 LIST_HEAD(umount_list); 1321 LIST_HEAD(umount_list);
1320 down_write(&namespace_sem); 1322 down_write(&namespace_sem);
1321 br_write_lock(vfsmount_lock); 1323 br_write_lock(&vfsmount_lock);
1322 umount_tree(real_mount(mnt), 0, &umount_list); 1324 umount_tree(real_mount(mnt), 0, &umount_list);
1323 br_write_unlock(vfsmount_lock); 1325 br_write_unlock(&vfsmount_lock);
1324 up_write(&namespace_sem); 1326 up_write(&namespace_sem);
1325 release_mounts(&umount_list); 1327 release_mounts(&umount_list);
1326} 1328}
@@ -1448,7 +1450,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
1448 if (err) 1450 if (err)
1449 goto out_cleanup_ids; 1451 goto out_cleanup_ids;
1450 1452
1451 br_write_lock(vfsmount_lock); 1453 br_write_lock(&vfsmount_lock);
1452 1454
1453 if (IS_MNT_SHARED(dest_mnt)) { 1455 if (IS_MNT_SHARED(dest_mnt)) {
1454 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 1456 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
@@ -1467,7 +1469,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
1467 list_del_init(&child->mnt_hash); 1469 list_del_init(&child->mnt_hash);
1468 commit_tree(child); 1470 commit_tree(child);
1469 } 1471 }
1470 br_write_unlock(vfsmount_lock); 1472 br_write_unlock(&vfsmount_lock);
1471 1473
1472 return 0; 1474 return 0;
1473 1475
@@ -1565,10 +1567,10 @@ static int do_change_type(struct path *path, int flag)
1565 goto out_unlock; 1567 goto out_unlock;
1566 } 1568 }
1567 1569
1568 br_write_lock(vfsmount_lock); 1570 br_write_lock(&vfsmount_lock);
1569 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) 1571 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1570 change_mnt_propagation(m, type); 1572 change_mnt_propagation(m, type);
1571 br_write_unlock(vfsmount_lock); 1573 br_write_unlock(&vfsmount_lock);
1572 1574
1573 out_unlock: 1575 out_unlock:
1574 up_write(&namespace_sem); 1576 up_write(&namespace_sem);
@@ -1617,9 +1619,9 @@ static int do_loopback(struct path *path, char *old_name,
1617 1619
1618 err = graft_tree(mnt, path); 1620 err = graft_tree(mnt, path);
1619 if (err) { 1621 if (err) {
1620 br_write_lock(vfsmount_lock); 1622 br_write_lock(&vfsmount_lock);
1621 umount_tree(mnt, 0, &umount_list); 1623 umount_tree(mnt, 0, &umount_list);
1622 br_write_unlock(vfsmount_lock); 1624 br_write_unlock(&vfsmount_lock);
1623 } 1625 }
1624out2: 1626out2:
1625 unlock_mount(path); 1627 unlock_mount(path);
@@ -1677,16 +1679,16 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
1677 else 1679 else
1678 err = do_remount_sb(sb, flags, data, 0); 1680 err = do_remount_sb(sb, flags, data, 0);
1679 if (!err) { 1681 if (!err) {
1680 br_write_lock(vfsmount_lock); 1682 br_write_lock(&vfsmount_lock);
1681 mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK; 1683 mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
1682 mnt->mnt.mnt_flags = mnt_flags; 1684 mnt->mnt.mnt_flags = mnt_flags;
1683 br_write_unlock(vfsmount_lock); 1685 br_write_unlock(&vfsmount_lock);
1684 } 1686 }
1685 up_write(&sb->s_umount); 1687 up_write(&sb->s_umount);
1686 if (!err) { 1688 if (!err) {
1687 br_write_lock(vfsmount_lock); 1689 br_write_lock(&vfsmount_lock);
1688 touch_mnt_namespace(mnt->mnt_ns); 1690 touch_mnt_namespace(mnt->mnt_ns);
1689 br_write_unlock(vfsmount_lock); 1691 br_write_unlock(&vfsmount_lock);
1690 } 1692 }
1691 return err; 1693 return err;
1692} 1694}
@@ -1893,9 +1895,9 @@ fail:
1893 /* remove m from any expiration list it may be on */ 1895 /* remove m from any expiration list it may be on */
1894 if (!list_empty(&mnt->mnt_expire)) { 1896 if (!list_empty(&mnt->mnt_expire)) {
1895 down_write(&namespace_sem); 1897 down_write(&namespace_sem);
1896 br_write_lock(vfsmount_lock); 1898 br_write_lock(&vfsmount_lock);
1897 list_del_init(&mnt->mnt_expire); 1899 list_del_init(&mnt->mnt_expire);
1898 br_write_unlock(vfsmount_lock); 1900 br_write_unlock(&vfsmount_lock);
1899 up_write(&namespace_sem); 1901 up_write(&namespace_sem);
1900 } 1902 }
1901 mntput(m); 1903 mntput(m);
@@ -1911,11 +1913,11 @@ fail:
1911void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) 1913void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
1912{ 1914{
1913 down_write(&namespace_sem); 1915 down_write(&namespace_sem);
1914 br_write_lock(vfsmount_lock); 1916 br_write_lock(&vfsmount_lock);
1915 1917
1916 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); 1918 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
1917 1919
1918 br_write_unlock(vfsmount_lock); 1920 br_write_unlock(&vfsmount_lock);
1919 up_write(&namespace_sem); 1921 up_write(&namespace_sem);
1920} 1922}
1921EXPORT_SYMBOL(mnt_set_expiry); 1923EXPORT_SYMBOL(mnt_set_expiry);
@@ -1935,7 +1937,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
1935 return; 1937 return;
1936 1938
1937 down_write(&namespace_sem); 1939 down_write(&namespace_sem);
1938 br_write_lock(vfsmount_lock); 1940 br_write_lock(&vfsmount_lock);
1939 1941
1940 /* extract from the expiration list every vfsmount that matches the 1942 /* extract from the expiration list every vfsmount that matches the
1941 * following criteria: 1943 * following criteria:
@@ -1954,7 +1956,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
1954 touch_mnt_namespace(mnt->mnt_ns); 1956 touch_mnt_namespace(mnt->mnt_ns);
1955 umount_tree(mnt, 1, &umounts); 1957 umount_tree(mnt, 1, &umounts);
1956 } 1958 }
1957 br_write_unlock(vfsmount_lock); 1959 br_write_unlock(&vfsmount_lock);
1958 up_write(&namespace_sem); 1960 up_write(&namespace_sem);
1959 1961
1960 release_mounts(&umounts); 1962 release_mounts(&umounts);
@@ -2218,9 +2220,9 @@ void mnt_make_shortterm(struct vfsmount *m)
2218 struct mount *mnt = real_mount(m); 2220 struct mount *mnt = real_mount(m);
2219 if (atomic_add_unless(&mnt->mnt_longterm, -1, 1)) 2221 if (atomic_add_unless(&mnt->mnt_longterm, -1, 1))
2220 return; 2222 return;
2221 br_write_lock(vfsmount_lock); 2223 br_write_lock(&vfsmount_lock);
2222 atomic_dec(&mnt->mnt_longterm); 2224 atomic_dec(&mnt->mnt_longterm);
2223 br_write_unlock(vfsmount_lock); 2225 br_write_unlock(&vfsmount_lock);
2224#endif 2226#endif
2225} 2227}
2226 2228
@@ -2250,9 +2252,9 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
2250 return ERR_PTR(-ENOMEM); 2252 return ERR_PTR(-ENOMEM);
2251 } 2253 }
2252 new_ns->root = new; 2254 new_ns->root = new;
2253 br_write_lock(vfsmount_lock); 2255 br_write_lock(&vfsmount_lock);
2254 list_add_tail(&new_ns->list, &new->mnt_list); 2256 list_add_tail(&new_ns->list, &new->mnt_list);
2255 br_write_unlock(vfsmount_lock); 2257 br_write_unlock(&vfsmount_lock);
2256 2258
2257 /* 2259 /*
2258 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts 2260 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
@@ -2416,9 +2418,9 @@ bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
2416int path_is_under(struct path *path1, struct path *path2) 2418int path_is_under(struct path *path1, struct path *path2)
2417{ 2419{
2418 int res; 2420 int res;
2419 br_read_lock(vfsmount_lock); 2421 br_read_lock(&vfsmount_lock);
2420 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); 2422 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
2421 br_read_unlock(vfsmount_lock); 2423 br_read_unlock(&vfsmount_lock);
2422 return res; 2424 return res;
2423} 2425}
2424EXPORT_SYMBOL(path_is_under); 2426EXPORT_SYMBOL(path_is_under);
@@ -2505,7 +2507,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2505 /* make sure we can reach put_old from new_root */ 2507 /* make sure we can reach put_old from new_root */
2506 if (!is_path_reachable(real_mount(old.mnt), old.dentry, &new)) 2508 if (!is_path_reachable(real_mount(old.mnt), old.dentry, &new))
2507 goto out4; 2509 goto out4;
2508 br_write_lock(vfsmount_lock); 2510 br_write_lock(&vfsmount_lock);
2509 detach_mnt(new_mnt, &parent_path); 2511 detach_mnt(new_mnt, &parent_path);
2510 detach_mnt(root_mnt, &root_parent); 2512 detach_mnt(root_mnt, &root_parent);
2511 /* mount old root on put_old */ 2513 /* mount old root on put_old */
@@ -2513,7 +2515,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2513 /* mount new_root on / */ 2515 /* mount new_root on / */
2514 attach_mnt(new_mnt, &root_parent); 2516 attach_mnt(new_mnt, &root_parent);
2515 touch_mnt_namespace(current->nsproxy->mnt_ns); 2517 touch_mnt_namespace(current->nsproxy->mnt_ns);
2516 br_write_unlock(vfsmount_lock); 2518 br_write_unlock(&vfsmount_lock);
2517 chroot_fs_refs(&root, &new); 2519 chroot_fs_refs(&root, &new);
2518 error = 0; 2520 error = 0;
2519out4: 2521out4:
@@ -2576,7 +2578,7 @@ void __init mnt_init(void)
2576 for (u = 0; u < HASH_SIZE; u++) 2578 for (u = 0; u < HASH_SIZE; u++)
2577 INIT_LIST_HEAD(&mount_hashtable[u]); 2579 INIT_LIST_HEAD(&mount_hashtable[u]);
2578 2580
2579 br_lock_init(vfsmount_lock); 2581 br_lock_init(&vfsmount_lock);
2580 2582
2581 err = sysfs_init(); 2583 err = sysfs_init();
2582 if (err) 2584 if (err)
@@ -2596,9 +2598,9 @@ void put_mnt_ns(struct mnt_namespace *ns)
2596 if (!atomic_dec_and_test(&ns->count)) 2598 if (!atomic_dec_and_test(&ns->count))
2597 return; 2599 return;
2598 down_write(&namespace_sem); 2600 down_write(&namespace_sem);
2599 br_write_lock(vfsmount_lock); 2601 br_write_lock(&vfsmount_lock);
2600 umount_tree(ns->root, 0, &umount_list); 2602 umount_tree(ns->root, 0, &umount_list);
2601 br_write_unlock(vfsmount_lock); 2603 br_write_unlock(&vfsmount_lock);
2602 up_write(&namespace_sem); 2604 up_write(&namespace_sem);
2603 release_mounts(&umount_list); 2605 release_mounts(&umount_list);
2604 kfree(ns); 2606 kfree(ns);