aboutsummaryrefslogtreecommitdiffstats
path: root/fs/namespace.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2013-03-16 15:12:40 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-04-09 14:12:54 -0400
commit97216be09efd41414725068212e3af0f05cde11a (patch)
tree4142569f2f7fb33cf7044fab8caac811d8ce912e /fs/namespace.c
parent328e6d9014636afc2b3c979403b36faadb412657 (diff)
fold release_mounts() into namespace_unlock()
... and provide namespace_lock() as a trivial wrapper; switch to those two consistently. Result is patterned after rtnl_lock/rtnl_unlock pair. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/namespace.c')
-rw-r--r--fs/namespace.c53
1 files changed, 30 insertions, 23 deletions
diff --git a/fs/namespace.c b/fs/namespace.c
index fa93d54d21e8..ed0708f2415f 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1121,11 +1121,21 @@ EXPORT_SYMBOL(may_umount);
1121 1121
1122static LIST_HEAD(unmounted); /* protected by namespace_sem */ 1122static LIST_HEAD(unmounted); /* protected by namespace_sem */
1123 1123
1124static void release_mounts(struct list_head *head) 1124static void namespace_unlock(void)
1125{ 1125{
1126 struct mount *mnt; 1126 struct mount *mnt;
1127 while (!list_empty(head)) { 1127 LIST_HEAD(head);
1128 mnt = list_first_entry(head, struct mount, mnt_hash); 1128
1129 if (likely(list_empty(&unmounted))) {
1130 up_write(&namespace_sem);
1131 return;
1132 }
1133
1134 list_splice_init(&unmounted, &head);
1135 up_write(&namespace_sem);
1136
1137 while (!list_empty(&head)) {
1138 mnt = list_first_entry(&head, struct mount, mnt_hash);
1129 list_del_init(&mnt->mnt_hash); 1139 list_del_init(&mnt->mnt_hash);
1130 if (mnt_has_parent(mnt)) { 1140 if (mnt_has_parent(mnt)) {
1131 struct dentry *dentry; 1141 struct dentry *dentry;
@@ -1145,12 +1155,9 @@ static void release_mounts(struct list_head *head)
1145 } 1155 }
1146} 1156}
1147 1157
1148static void namespace_unlock(void) 1158static inline void namespace_lock(void)
1149{ 1159{
1150 LIST_HEAD(head); 1160 down_write(&namespace_sem);
1151 list_splice_init(&unmounted, &head);
1152 up_write(&namespace_sem);
1153 release_mounts(&head);
1154} 1161}
1155 1162
1156/* 1163/*
@@ -1256,7 +1263,7 @@ static int do_umount(struct mount *mnt, int flags)
1256 return retval; 1263 return retval;
1257 } 1264 }
1258 1265
1259 down_write(&namespace_sem); 1266 namespace_lock();
1260 br_write_lock(&vfsmount_lock); 1267 br_write_lock(&vfsmount_lock);
1261 event++; 1268 event++;
1262 1269
@@ -1412,7 +1419,7 @@ out:
1412struct vfsmount *collect_mounts(struct path *path) 1419struct vfsmount *collect_mounts(struct path *path)
1413{ 1420{
1414 struct mount *tree; 1421 struct mount *tree;
1415 down_write(&namespace_sem); 1422 namespace_lock();
1416 tree = copy_tree(real_mount(path->mnt), path->dentry, 1423 tree = copy_tree(real_mount(path->mnt), path->dentry,
1417 CL_COPY_ALL | CL_PRIVATE); 1424 CL_COPY_ALL | CL_PRIVATE);
1418 namespace_unlock(); 1425 namespace_unlock();
@@ -1423,7 +1430,7 @@ struct vfsmount *collect_mounts(struct path *path)
1423 1430
1424void drop_collected_mounts(struct vfsmount *mnt) 1431void drop_collected_mounts(struct vfsmount *mnt)
1425{ 1432{
1426 down_write(&namespace_sem); 1433 namespace_lock();
1427 br_write_lock(&vfsmount_lock); 1434 br_write_lock(&vfsmount_lock);
1428 umount_tree(real_mount(mnt), 0); 1435 umount_tree(real_mount(mnt), 0);
1429 br_write_unlock(&vfsmount_lock); 1436 br_write_unlock(&vfsmount_lock);
@@ -1593,18 +1600,18 @@ retry:
1593 mutex_unlock(&dentry->d_inode->i_mutex); 1600 mutex_unlock(&dentry->d_inode->i_mutex);
1594 return ERR_PTR(-ENOENT); 1601 return ERR_PTR(-ENOENT);
1595 } 1602 }
1596 down_write(&namespace_sem); 1603 namespace_lock();
1597 mnt = lookup_mnt(path); 1604 mnt = lookup_mnt(path);
1598 if (likely(!mnt)) { 1605 if (likely(!mnt)) {
1599 struct mountpoint *mp = new_mountpoint(dentry); 1606 struct mountpoint *mp = new_mountpoint(dentry);
1600 if (IS_ERR(mp)) { 1607 if (IS_ERR(mp)) {
1601 up_write(&namespace_sem); 1608 namespace_unlock();
1602 mutex_unlock(&dentry->d_inode->i_mutex); 1609 mutex_unlock(&dentry->d_inode->i_mutex);
1603 return mp; 1610 return mp;
1604 } 1611 }
1605 return mp; 1612 return mp;
1606 } 1613 }
1607 up_write(&namespace_sem); 1614 namespace_unlock();
1608 mutex_unlock(&path->dentry->d_inode->i_mutex); 1615 mutex_unlock(&path->dentry->d_inode->i_mutex);
1609 path_put(path); 1616 path_put(path);
1610 path->mnt = mnt; 1617 path->mnt = mnt;
@@ -1667,7 +1674,7 @@ static int do_change_type(struct path *path, int flag)
1667 if (!type) 1674 if (!type)
1668 return -EINVAL; 1675 return -EINVAL;
1669 1676
1670 down_write(&namespace_sem); 1677 namespace_lock();
1671 if (type == MS_SHARED) { 1678 if (type == MS_SHARED) {
1672 err = invent_group_ids(mnt, recurse); 1679 err = invent_group_ids(mnt, recurse);
1673 if (err) 1680 if (err)
@@ -1680,7 +1687,7 @@ static int do_change_type(struct path *path, int flag)
1680 br_write_unlock(&vfsmount_lock); 1687 br_write_unlock(&vfsmount_lock);
1681 1688
1682 out_unlock: 1689 out_unlock:
1683 up_write(&namespace_sem); 1690 namespace_unlock();
1684 return err; 1691 return err;
1685} 1692}
1686 1693
@@ -2016,11 +2023,11 @@ int finish_automount(struct vfsmount *m, struct path *path)
2016fail: 2023fail:
2017 /* remove m from any expiration list it may be on */ 2024 /* remove m from any expiration list it may be on */
2018 if (!list_empty(&mnt->mnt_expire)) { 2025 if (!list_empty(&mnt->mnt_expire)) {
2019 down_write(&namespace_sem); 2026 namespace_lock();
2020 br_write_lock(&vfsmount_lock); 2027 br_write_lock(&vfsmount_lock);
2021 list_del_init(&mnt->mnt_expire); 2028 list_del_init(&mnt->mnt_expire);
2022 br_write_unlock(&vfsmount_lock); 2029 br_write_unlock(&vfsmount_lock);
2023 up_write(&namespace_sem); 2030 namespace_unlock();
2024 } 2031 }
2025 mntput(m); 2032 mntput(m);
2026 mntput(m); 2033 mntput(m);
@@ -2034,13 +2041,13 @@ fail:
2034 */ 2041 */
2035void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) 2042void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
2036{ 2043{
2037 down_write(&namespace_sem); 2044 namespace_lock();
2038 br_write_lock(&vfsmount_lock); 2045 br_write_lock(&vfsmount_lock);
2039 2046
2040 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); 2047 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
2041 2048
2042 br_write_unlock(&vfsmount_lock); 2049 br_write_unlock(&vfsmount_lock);
2043 up_write(&namespace_sem); 2050 namespace_unlock();
2044} 2051}
2045EXPORT_SYMBOL(mnt_set_expiry); 2052EXPORT_SYMBOL(mnt_set_expiry);
2046 2053
@@ -2057,7 +2064,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
2057 if (list_empty(mounts)) 2064 if (list_empty(mounts))
2058 return; 2065 return;
2059 2066
2060 down_write(&namespace_sem); 2067 namespace_lock();
2061 br_write_lock(&vfsmount_lock); 2068 br_write_lock(&vfsmount_lock);
2062 2069
2063 /* extract from the expiration list every vfsmount that matches the 2070 /* extract from the expiration list every vfsmount that matches the
@@ -2373,7 +2380,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
2373 if (IS_ERR(new_ns)) 2380 if (IS_ERR(new_ns))
2374 return new_ns; 2381 return new_ns;
2375 2382
2376 down_write(&namespace_sem); 2383 namespace_lock();
2377 /* First pass: copy the tree topology */ 2384 /* First pass: copy the tree topology */
2378 copy_flags = CL_COPY_ALL | CL_EXPIRE; 2385 copy_flags = CL_COPY_ALL | CL_EXPIRE;
2379 if (user_ns != mnt_ns->user_ns) 2386 if (user_ns != mnt_ns->user_ns)
@@ -2733,7 +2740,7 @@ void put_mnt_ns(struct mnt_namespace *ns)
2733{ 2740{
2734 if (!atomic_dec_and_test(&ns->count)) 2741 if (!atomic_dec_and_test(&ns->count))
2735 return; 2742 return;
2736 down_write(&namespace_sem); 2743 namespace_lock();
2737 br_write_lock(&vfsmount_lock); 2744 br_write_lock(&vfsmount_lock);
2738 umount_tree(ns->root, 0); 2745 umount_tree(ns->root, 0);
2739 br_write_unlock(&vfsmount_lock); 2746 br_write_unlock(&vfsmount_lock);