aboutsummaryrefslogtreecommitdiffstats
path: root/fs/namespace.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2011-11-25 03:06:56 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2012-01-03 22:57:12 -0500
commit909b0a88ef2dc86bd5d2223edf48eb30c865cb69 (patch)
treeb30a115b04e93433a8a9360d9cea0c9ec52cb484 /fs/namespace.c
parentc63181e6b6df89176b3984c6977bb5ec03d0df23 (diff)
vfs: spread struct mount - remaining argument of next_mnt()
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/namespace.c')
-rw-r--r--fs/namespace.c35
1 files changed, 18 insertions, 17 deletions
diff --git a/fs/namespace.c b/fs/namespace.c
index 124a12555fe4..24e845671ad3 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -631,12 +631,12 @@ static void commit_tree(struct mount *mnt)
631 touch_mnt_namespace(n); 631 touch_mnt_namespace(n);
632} 632}
633 633
634static struct mount *next_mnt(struct mount *p, struct vfsmount *root) 634static struct mount *next_mnt(struct mount *p, struct mount *root)
635{ 635{
636 struct list_head *next = p->mnt_mounts.next; 636 struct list_head *next = p->mnt_mounts.next;
637 if (next == &p->mnt_mounts) { 637 if (next == &p->mnt_mounts) {
638 while (1) { 638 while (1) {
639 if (&p->mnt == root) 639 if (p == root)
640 return NULL; 640 return NULL;
641 next = p->mnt_child.next; 641 next = p->mnt_child.next;
642 if (next != &p->mnt_parent->mnt_mounts) 642 if (next != &p->mnt_parent->mnt_mounts)
@@ -1145,16 +1145,17 @@ const struct seq_operations mountstats_op = {
1145 * open files, pwds, chroots or sub mounts that are 1145 * open files, pwds, chroots or sub mounts that are
1146 * busy. 1146 * busy.
1147 */ 1147 */
1148int may_umount_tree(struct vfsmount *mnt) 1148int may_umount_tree(struct vfsmount *m)
1149{ 1149{
1150 struct mount *mnt = real_mount(m);
1150 int actual_refs = 0; 1151 int actual_refs = 0;
1151 int minimum_refs = 0; 1152 int minimum_refs = 0;
1152 struct mount *p; 1153 struct mount *p;
1153 BUG_ON(!mnt); 1154 BUG_ON(!m);
1154 1155
1155 /* write lock needed for mnt_get_count */ 1156 /* write lock needed for mnt_get_count */
1156 br_write_lock(vfsmount_lock); 1157 br_write_lock(vfsmount_lock);
1157 for (p = real_mount(mnt); p; p = next_mnt(p, mnt)) { 1158 for (p = mnt; p; p = next_mnt(p, mnt)) {
1158 actual_refs += mnt_get_count(p); 1159 actual_refs += mnt_get_count(p);
1159 minimum_refs += 2; 1160 minimum_refs += 2;
1160 } 1161 }
@@ -1228,7 +1229,7 @@ void umount_tree(struct mount *mnt, int propagate, struct list_head *kill)
1228 LIST_HEAD(tmp_list); 1229 LIST_HEAD(tmp_list);
1229 struct mount *p; 1230 struct mount *p;
1230 1231
1231 for (p = mnt; p; p = next_mnt(p, &mnt->mnt)) 1232 for (p = mnt; p; p = next_mnt(p, mnt))
1232 list_move(&p->mnt_hash, &tmp_list); 1233 list_move(&p->mnt_hash, &tmp_list);
1233 1234
1234 if (propagate) 1235 if (propagate)
@@ -1436,7 +1437,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1436 if (!is_subdir(r->mnt_mountpoint, dentry)) 1437 if (!is_subdir(r->mnt_mountpoint, dentry))
1437 continue; 1438 continue;
1438 1439
1439 for (s = r; s; s = next_mnt(s, &r->mnt)) { 1440 for (s = r; s; s = next_mnt(s, r)) {
1440 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) { 1441 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
1441 s = skip_mnt_tree(s); 1442 s = skip_mnt_tree(s);
1442 continue; 1443 continue;
@@ -1509,7 +1510,7 @@ static void cleanup_group_ids(struct mount *mnt, struct mount *end)
1509{ 1510{
1510 struct mount *p; 1511 struct mount *p;
1511 1512
1512 for (p = mnt; p != end; p = next_mnt(p, &mnt->mnt)) { 1513 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1513 if (p->mnt_group_id && !IS_MNT_SHARED(p)) 1514 if (p->mnt_group_id && !IS_MNT_SHARED(p))
1514 mnt_release_group_id(p); 1515 mnt_release_group_id(p);
1515 } 1516 }
@@ -1519,7 +1520,7 @@ static int invent_group_ids(struct mount *mnt, bool recurse)
1519{ 1520{
1520 struct mount *p; 1521 struct mount *p;
1521 1522
1522 for (p = mnt; p; p = recurse ? next_mnt(p, &mnt->mnt) : NULL) { 1523 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1523 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) { 1524 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1524 int err = mnt_alloc_group_id(p); 1525 int err = mnt_alloc_group_id(p);
1525 if (err) { 1526 if (err) {
@@ -1616,7 +1617,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
1616 br_write_lock(vfsmount_lock); 1617 br_write_lock(vfsmount_lock);
1617 1618
1618 if (IS_MNT_SHARED(dest_mnt)) { 1619 if (IS_MNT_SHARED(dest_mnt)) {
1619 for (p = source_mnt; p; p = next_mnt(p, &source_mnt->mnt)) 1620 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
1620 set_mnt_shared(p); 1621 set_mnt_shared(p);
1621 } 1622 }
1622 if (parent_path) { 1623 if (parent_path) {
@@ -1731,7 +1732,7 @@ static int do_change_type(struct path *path, int flag)
1731 } 1732 }
1732 1733
1733 br_write_lock(vfsmount_lock); 1734 br_write_lock(vfsmount_lock);
1734 for (m = mnt; m; m = (recurse ? next_mnt(m, &mnt->mnt) : NULL)) 1735 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1735 change_mnt_propagation(m, type); 1736 change_mnt_propagation(m, type);
1736 br_write_unlock(vfsmount_lock); 1737 br_write_unlock(vfsmount_lock);
1737 1738
@@ -1859,7 +1860,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
1859static inline int tree_contains_unbindable(struct mount *mnt) 1860static inline int tree_contains_unbindable(struct mount *mnt)
1860{ 1861{
1861 struct mount *p; 1862 struct mount *p;
1862 for (p = mnt; p; p = next_mnt(p, &mnt->mnt)) { 1863 for (p = mnt; p; p = next_mnt(p, mnt)) {
1863 if (IS_MNT_UNBINDABLE(p)) 1864 if (IS_MNT_UNBINDABLE(p))
1864 return 1; 1865 return 1;
1865 } 1866 }
@@ -2399,6 +2400,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
2399 struct mnt_namespace *new_ns; 2400 struct mnt_namespace *new_ns;
2400 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL; 2401 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
2401 struct mount *p, *q; 2402 struct mount *p, *q;
2403 struct mount *old = real_mount(mnt_ns->root);
2402 struct mount *new; 2404 struct mount *new;
2403 2405
2404 new_ns = alloc_mnt_ns(); 2406 new_ns = alloc_mnt_ns();
@@ -2407,8 +2409,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
2407 2409
2408 down_write(&namespace_sem); 2410 down_write(&namespace_sem);
2409 /* First pass: copy the tree topology */ 2411 /* First pass: copy the tree topology */
2410 new = copy_tree(real_mount(mnt_ns->root), mnt_ns->root->mnt_root, 2412 new = copy_tree(old, old->mnt.mnt_root, CL_COPY_ALL | CL_EXPIRE);
2411 CL_COPY_ALL | CL_EXPIRE);
2412 if (!new) { 2413 if (!new) {
2413 up_write(&namespace_sem); 2414 up_write(&namespace_sem);
2414 kfree(new_ns); 2415 kfree(new_ns);
@@ -2424,7 +2425,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
2424 * as belonging to new namespace. We have already acquired a private 2425 * as belonging to new namespace. We have already acquired a private
2425 * fs_struct, so tsk->fs->lock is not needed. 2426 * fs_struct, so tsk->fs->lock is not needed.
2426 */ 2427 */
2427 p = real_mount(mnt_ns->root); 2428 p = old;
2428 q = new; 2429 q = new;
2429 while (p) { 2430 while (p) {
2430 q->mnt_ns = new_ns; 2431 q->mnt_ns = new_ns;
@@ -2443,8 +2444,8 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
2443 pwdmnt = &p->mnt; 2444 pwdmnt = &p->mnt;
2444 } 2445 }
2445 } 2446 }
2446 p = next_mnt(p, mnt_ns->root); 2447 p = next_mnt(p, old);
2447 q = next_mnt(q, new_ns->root); 2448 q = next_mnt(q, new);
2448 } 2449 }
2449 up_write(&namespace_sem); 2450 up_write(&namespace_sem);
2450 2451