aboutsummaryrefslogtreecommitdiffstats
path: root/fs/namespace.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2013-09-29 11:24:49 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-10-24 23:34:59 -0400
commit719ea2fbb553ab3f61a174a4b5861289dcc46cb1 (patch)
tree00c1e2c71caf2338cfe27f2854cf331515b4f411 /fs/namespace.c
parentaab407fc5c0ce63e9fd4a34a790d7290d3e116a1 (diff)
new helpers: lock_mount_hash/unlock_mount_hash
aka br_write_{lock,unlock} of vfsmount_lock. Inlines in fs/mount.h, vfsmount_lock extern moved over there as well. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/namespace.c')
-rw-r--r--fs/namespace.c96
1 files changed, 48 insertions, 48 deletions
diff --git a/fs/namespace.c b/fs/namespace.c
index 846ea43ab0c6..5cbe8cefadb5 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -456,7 +456,7 @@ static int mnt_make_readonly(struct mount *mnt)
456{ 456{
457 int ret = 0; 457 int ret = 0;
458 458
459 br_write_lock(&vfsmount_lock); 459 lock_mount_hash();
460 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; 460 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
461 /* 461 /*
462 * After storing MNT_WRITE_HOLD, we'll read the counters. This store 462 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
@@ -490,15 +490,15 @@ static int mnt_make_readonly(struct mount *mnt)
490 */ 490 */
491 smp_wmb(); 491 smp_wmb();
492 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 492 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
493 br_write_unlock(&vfsmount_lock); 493 unlock_mount_hash();
494 return ret; 494 return ret;
495} 495}
496 496
497static void __mnt_unmake_readonly(struct mount *mnt) 497static void __mnt_unmake_readonly(struct mount *mnt)
498{ 498{
499 br_write_lock(&vfsmount_lock); 499 lock_mount_hash();
500 mnt->mnt.mnt_flags &= ~MNT_READONLY; 500 mnt->mnt.mnt_flags &= ~MNT_READONLY;
501 br_write_unlock(&vfsmount_lock); 501 unlock_mount_hash();
502} 502}
503 503
504int sb_prepare_remount_readonly(struct super_block *sb) 504int sb_prepare_remount_readonly(struct super_block *sb)
@@ -510,7 +510,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)
510 if (atomic_long_read(&sb->s_remove_count)) 510 if (atomic_long_read(&sb->s_remove_count))
511 return -EBUSY; 511 return -EBUSY;
512 512
513 br_write_lock(&vfsmount_lock); 513 lock_mount_hash();
514 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { 514 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
515 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { 515 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
516 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; 516 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
@@ -532,7 +532,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)
532 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) 532 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
533 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 533 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
534 } 534 }
535 br_write_unlock(&vfsmount_lock); 535 unlock_mount_hash();
536 536
537 return err; 537 return err;
538} 538}
@@ -794,9 +794,9 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
794 mnt->mnt.mnt_sb = root->d_sb; 794 mnt->mnt.mnt_sb = root->d_sb;
795 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 795 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
796 mnt->mnt_parent = mnt; 796 mnt->mnt_parent = mnt;
797 br_write_lock(&vfsmount_lock); 797 lock_mount_hash();
798 list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts); 798 list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
799 br_write_unlock(&vfsmount_lock); 799 unlock_mount_hash();
800 return &mnt->mnt; 800 return &mnt->mnt;
801} 801}
802EXPORT_SYMBOL_GPL(vfs_kern_mount); 802EXPORT_SYMBOL_GPL(vfs_kern_mount);
@@ -837,9 +837,9 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
837 mnt->mnt.mnt_root = dget(root); 837 mnt->mnt.mnt_root = dget(root);
838 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 838 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
839 mnt->mnt_parent = mnt; 839 mnt->mnt_parent = mnt;
840 br_write_lock(&vfsmount_lock); 840 lock_mount_hash();
841 list_add_tail(&mnt->mnt_instance, &sb->s_mounts); 841 list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
842 br_write_unlock(&vfsmount_lock); 842 unlock_mount_hash();
843 843
844 if ((flag & CL_SLAVE) || 844 if ((flag & CL_SLAVE) ||
845 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) { 845 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
@@ -883,28 +883,28 @@ put_again:
883 } 883 }
884 br_read_unlock(&vfsmount_lock); 884 br_read_unlock(&vfsmount_lock);
885 885
886 br_write_lock(&vfsmount_lock); 886 lock_mount_hash();
887 mnt_add_count(mnt, -1); 887 mnt_add_count(mnt, -1);
888 if (mnt_get_count(mnt)) { 888 if (mnt_get_count(mnt)) {
889 br_write_unlock(&vfsmount_lock); 889 unlock_mount_hash();
890 return; 890 return;
891 } 891 }
892#else 892#else
893 mnt_add_count(mnt, -1); 893 mnt_add_count(mnt, -1);
894 if (likely(mnt_get_count(mnt))) 894 if (likely(mnt_get_count(mnt)))
895 return; 895 return;
896 br_write_lock(&vfsmount_lock); 896 lock_mount_hash();
897#endif 897#endif
898 if (unlikely(mnt->mnt_pinned)) { 898 if (unlikely(mnt->mnt_pinned)) {
899 mnt_add_count(mnt, mnt->mnt_pinned + 1); 899 mnt_add_count(mnt, mnt->mnt_pinned + 1);
900 mnt->mnt_pinned = 0; 900 mnt->mnt_pinned = 0;
901 br_write_unlock(&vfsmount_lock); 901 unlock_mount_hash();
902 acct_auto_close_mnt(&mnt->mnt); 902 acct_auto_close_mnt(&mnt->mnt);
903 goto put_again; 903 goto put_again;
904 } 904 }
905 905
906 list_del(&mnt->mnt_instance); 906 list_del(&mnt->mnt_instance);
907 br_write_unlock(&vfsmount_lock); 907 unlock_mount_hash();
908 908
909 /* 909 /*
910 * This probably indicates that somebody messed 910 * This probably indicates that somebody messed
@@ -945,21 +945,21 @@ EXPORT_SYMBOL(mntget);
945 945
946void mnt_pin(struct vfsmount *mnt) 946void mnt_pin(struct vfsmount *mnt)
947{ 947{
948 br_write_lock(&vfsmount_lock); 948 lock_mount_hash();
949 real_mount(mnt)->mnt_pinned++; 949 real_mount(mnt)->mnt_pinned++;
950 br_write_unlock(&vfsmount_lock); 950 unlock_mount_hash();
951} 951}
952EXPORT_SYMBOL(mnt_pin); 952EXPORT_SYMBOL(mnt_pin);
953 953
954void mnt_unpin(struct vfsmount *m) 954void mnt_unpin(struct vfsmount *m)
955{ 955{
956 struct mount *mnt = real_mount(m); 956 struct mount *mnt = real_mount(m);
957 br_write_lock(&vfsmount_lock); 957 lock_mount_hash();
958 if (mnt->mnt_pinned) { 958 if (mnt->mnt_pinned) {
959 mnt_add_count(mnt, 1); 959 mnt_add_count(mnt, 1);
960 mnt->mnt_pinned--; 960 mnt->mnt_pinned--;
961 } 961 }
962 br_write_unlock(&vfsmount_lock); 962 unlock_mount_hash();
963} 963}
964EXPORT_SYMBOL(mnt_unpin); 964EXPORT_SYMBOL(mnt_unpin);
965 965
@@ -1076,12 +1076,12 @@ int may_umount_tree(struct vfsmount *m)
1076 BUG_ON(!m); 1076 BUG_ON(!m);
1077 1077
1078 /* write lock needed for mnt_get_count */ 1078 /* write lock needed for mnt_get_count */
1079 br_write_lock(&vfsmount_lock); 1079 lock_mount_hash();
1080 for (p = mnt; p; p = next_mnt(p, mnt)) { 1080 for (p = mnt; p; p = next_mnt(p, mnt)) {
1081 actual_refs += mnt_get_count(p); 1081 actual_refs += mnt_get_count(p);
1082 minimum_refs += 2; 1082 minimum_refs += 2;
1083 } 1083 }
1084 br_write_unlock(&vfsmount_lock); 1084 unlock_mount_hash();
1085 1085
1086 if (actual_refs > minimum_refs) 1086 if (actual_refs > minimum_refs)
1087 return 0; 1087 return 0;
@@ -1108,10 +1108,10 @@ int may_umount(struct vfsmount *mnt)
1108{ 1108{
1109 int ret = 1; 1109 int ret = 1;
1110 down_read(&namespace_sem); 1110 down_read(&namespace_sem);
1111 br_write_lock(&vfsmount_lock); 1111 lock_mount_hash();
1112 if (propagate_mount_busy(real_mount(mnt), 2)) 1112 if (propagate_mount_busy(real_mount(mnt), 2))
1113 ret = 0; 1113 ret = 0;
1114 br_write_unlock(&vfsmount_lock); 1114 unlock_mount_hash();
1115 up_read(&namespace_sem); 1115 up_read(&namespace_sem);
1116 return ret; 1116 return ret;
1117} 1117}
@@ -1208,12 +1208,12 @@ static int do_umount(struct mount *mnt, int flags)
1208 * probably don't strictly need the lock here if we examined 1208 * probably don't strictly need the lock here if we examined
1209 * all race cases, but it's a slowpath. 1209 * all race cases, but it's a slowpath.
1210 */ 1210 */
1211 br_write_lock(&vfsmount_lock); 1211 lock_mount_hash();
1212 if (mnt_get_count(mnt) != 2) { 1212 if (mnt_get_count(mnt) != 2) {
1213 br_write_unlock(&vfsmount_lock); 1213 unlock_mount_hash();
1214 return -EBUSY; 1214 return -EBUSY;
1215 } 1215 }
1216 br_write_unlock(&vfsmount_lock); 1216 unlock_mount_hash();
1217 1217
1218 if (!xchg(&mnt->mnt_expiry_mark, 1)) 1218 if (!xchg(&mnt->mnt_expiry_mark, 1))
1219 return -EAGAIN; 1219 return -EAGAIN;
@@ -1255,7 +1255,7 @@ static int do_umount(struct mount *mnt, int flags)
1255 } 1255 }
1256 1256
1257 namespace_lock(); 1257 namespace_lock();
1258 br_write_lock(&vfsmount_lock); 1258 lock_mount_hash();
1259 event++; 1259 event++;
1260 1260
1261 if (!(flags & MNT_DETACH)) 1261 if (!(flags & MNT_DETACH))
@@ -1267,7 +1267,7 @@ static int do_umount(struct mount *mnt, int flags)
1267 umount_tree(mnt, 1); 1267 umount_tree(mnt, 1);
1268 retval = 0; 1268 retval = 0;
1269 } 1269 }
1270 br_write_unlock(&vfsmount_lock); 1270 unlock_mount_hash();
1271 namespace_unlock(); 1271 namespace_unlock();
1272 return retval; 1272 return retval;
1273} 1273}
@@ -1410,18 +1410,18 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1410 q = clone_mnt(p, p->mnt.mnt_root, flag); 1410 q = clone_mnt(p, p->mnt.mnt_root, flag);
1411 if (IS_ERR(q)) 1411 if (IS_ERR(q))
1412 goto out; 1412 goto out;
1413 br_write_lock(&vfsmount_lock); 1413 lock_mount_hash();
1414 list_add_tail(&q->mnt_list, &res->mnt_list); 1414 list_add_tail(&q->mnt_list, &res->mnt_list);
1415 attach_mnt(q, parent, p->mnt_mp); 1415 attach_mnt(q, parent, p->mnt_mp);
1416 br_write_unlock(&vfsmount_lock); 1416 unlock_mount_hash();
1417 } 1417 }
1418 } 1418 }
1419 return res; 1419 return res;
1420out: 1420out:
1421 if (res) { 1421 if (res) {
1422 br_write_lock(&vfsmount_lock); 1422 lock_mount_hash();
1423 umount_tree(res, 0); 1423 umount_tree(res, 0);
1424 br_write_unlock(&vfsmount_lock); 1424 unlock_mount_hash();
1425 } 1425 }
1426 return q; 1426 return q;
1427} 1427}
@@ -1443,9 +1443,9 @@ struct vfsmount *collect_mounts(struct path *path)
1443void drop_collected_mounts(struct vfsmount *mnt) 1443void drop_collected_mounts(struct vfsmount *mnt)
1444{ 1444{
1445 namespace_lock(); 1445 namespace_lock();
1446 br_write_lock(&vfsmount_lock); 1446 lock_mount_hash();
1447 umount_tree(real_mount(mnt), 0); 1447 umount_tree(real_mount(mnt), 0);
1448 br_write_unlock(&vfsmount_lock); 1448 unlock_mount_hash();
1449 namespace_unlock(); 1449 namespace_unlock();
1450} 1450}
1451 1451
@@ -1572,7 +1572,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
1572 if (err) 1572 if (err)
1573 goto out_cleanup_ids; 1573 goto out_cleanup_ids;
1574 1574
1575 br_write_lock(&vfsmount_lock); 1575 lock_mount_hash();
1576 1576
1577 if (IS_MNT_SHARED(dest_mnt)) { 1577 if (IS_MNT_SHARED(dest_mnt)) {
1578 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 1578 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
@@ -1591,7 +1591,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
1591 list_del_init(&child->mnt_hash); 1591 list_del_init(&child->mnt_hash);
1592 commit_tree(child); 1592 commit_tree(child);
1593 } 1593 }
1594 br_write_unlock(&vfsmount_lock); 1594 unlock_mount_hash();
1595 1595
1596 return 0; 1596 return 0;
1597 1597
@@ -1693,10 +1693,10 @@ static int do_change_type(struct path *path, int flag)
1693 goto out_unlock; 1693 goto out_unlock;
1694 } 1694 }
1695 1695
1696 br_write_lock(&vfsmount_lock); 1696 lock_mount_hash();
1697 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) 1697 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1698 change_mnt_propagation(m, type); 1698 change_mnt_propagation(m, type);
1699 br_write_unlock(&vfsmount_lock); 1699 unlock_mount_hash();
1700 1700
1701 out_unlock: 1701 out_unlock:
1702 namespace_unlock(); 1702 namespace_unlock();
@@ -1768,9 +1768,9 @@ static int do_loopback(struct path *path, const char *old_name,
1768 1768
1769 err = graft_tree(mnt, parent, mp); 1769 err = graft_tree(mnt, parent, mp);
1770 if (err) { 1770 if (err) {
1771 br_write_lock(&vfsmount_lock); 1771 lock_mount_hash();
1772 umount_tree(mnt, 0); 1772 umount_tree(mnt, 0);
1773 br_write_unlock(&vfsmount_lock); 1773 unlock_mount_hash();
1774 } 1774 }
1775out2: 1775out2:
1776 unlock_mount(mp); 1776 unlock_mount(mp);
@@ -1829,11 +1829,11 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
1829 else 1829 else
1830 err = do_remount_sb(sb, flags, data, 0); 1830 err = do_remount_sb(sb, flags, data, 0);
1831 if (!err) { 1831 if (!err) {
1832 br_write_lock(&vfsmount_lock); 1832 lock_mount_hash();
1833 mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK; 1833 mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
1834 mnt->mnt.mnt_flags = mnt_flags; 1834 mnt->mnt.mnt_flags = mnt_flags;
1835 touch_mnt_namespace(mnt->mnt_ns); 1835 touch_mnt_namespace(mnt->mnt_ns);
1836 br_write_unlock(&vfsmount_lock); 1836 unlock_mount_hash();
1837 } 1837 }
1838 up_write(&sb->s_umount); 1838 up_write(&sb->s_umount);
1839 return err; 1839 return err;
@@ -2093,7 +2093,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
2093 return; 2093 return;
2094 2094
2095 namespace_lock(); 2095 namespace_lock();
2096 br_write_lock(&vfsmount_lock); 2096 lock_mount_hash();
2097 2097
2098 /* extract from the expiration list every vfsmount that matches the 2098 /* extract from the expiration list every vfsmount that matches the
2099 * following criteria: 2099 * following criteria:
@@ -2112,7 +2112,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
2112 touch_mnt_namespace(mnt->mnt_ns); 2112 touch_mnt_namespace(mnt->mnt_ns);
2113 umount_tree(mnt, 1); 2113 umount_tree(mnt, 1);
2114 } 2114 }
2115 br_write_unlock(&vfsmount_lock); 2115 unlock_mount_hash();
2116 namespace_unlock(); 2116 namespace_unlock();
2117} 2117}
2118 2118
@@ -2662,7 +2662,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2662 if (!is_path_reachable(old_mnt, old.dentry, &new)) 2662 if (!is_path_reachable(old_mnt, old.dentry, &new))
2663 goto out4; 2663 goto out4;
2664 root_mp->m_count++; /* pin it so it won't go away */ 2664 root_mp->m_count++; /* pin it so it won't go away */
2665 br_write_lock(&vfsmount_lock); 2665 lock_mount_hash();
2666 detach_mnt(new_mnt, &parent_path); 2666 detach_mnt(new_mnt, &parent_path);
2667 detach_mnt(root_mnt, &root_parent); 2667 detach_mnt(root_mnt, &root_parent);
2668 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) { 2668 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
@@ -2674,7 +2674,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2674 /* mount new_root on / */ 2674 /* mount new_root on / */
2675 attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp); 2675 attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp);
2676 touch_mnt_namespace(current->nsproxy->mnt_ns); 2676 touch_mnt_namespace(current->nsproxy->mnt_ns);
2677 br_write_unlock(&vfsmount_lock); 2677 unlock_mount_hash();
2678 chroot_fs_refs(&root, &new); 2678 chroot_fs_refs(&root, &new);
2679 put_mountpoint(root_mp); 2679 put_mountpoint(root_mp);
2680 error = 0; 2680 error = 0;
@@ -2784,9 +2784,9 @@ void kern_unmount(struct vfsmount *mnt)
2784{ 2784{
2785 /* release long term mount so mount point can be released */ 2785 /* release long term mount so mount point can be released */
2786 if (!IS_ERR_OR_NULL(mnt)) { 2786 if (!IS_ERR_OR_NULL(mnt)) {
2787 br_write_lock(&vfsmount_lock); 2787 lock_mount_hash();
2788 real_mount(mnt)->mnt_ns = NULL; 2788 real_mount(mnt)->mnt_ns = NULL;
2789 br_write_unlock(&vfsmount_lock); 2789 unlock_mount_hash();
2790 mntput(mnt); 2790 mntput(mnt);
2791 } 2791 }
2792} 2792}