diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2014-08-10 03:44:55 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2014-08-11 12:28:10 -0400 |
commit | 12a5b5294cb1896e9a3c9fca8ff5a7e3def4e8c6 (patch) | |
tree | 9a3780d90c484e1a18e37b980d3d6cf2ac766711 | |
parent | 60bb45297f7551833346c5cebc6d483ea17ea5f2 (diff) |
fix copy_tree() regression
Since 3.14 we had copy_tree() get the shadowing wrong - if we had one
vfsmount shadowing another (i.e. if A is a slave of B, C is mounted
on A/foo, then D got mounted on B/foo creating D' on A/foo shadowed
by C), copy_tree() of A would make a copy of D' shadow the the copy of
C, not the other way around.
It's easy to fix, fortunately - just make sure that mount follows
the one that shadows it in mnt_child as well as in mnt_hash, and when
copy_tree() decides to attach a new mount, check if the last child
it has added to the same parent should be shadowing the new one.
And if it should, just use the same logics commit_tree() has - put the
new mount into the hash and children lists right after the one that
should shadow it.
Cc: stable@vger.kernel.org [3.14 and later]
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r-- | fs/namespace.c | 31 |
1 files changed, 24 insertions, 7 deletions
diff --git a/fs/namespace.c b/fs/namespace.c index 65af9d0e0d67..be3f6f23a47d 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -778,6 +778,20 @@ static void attach_mnt(struct mount *mnt, | |||
778 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); | 778 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); |
779 | } | 779 | } |
780 | 780 | ||
781 | static void attach_shadowed(struct mount *mnt, | ||
782 | struct mount *parent, | ||
783 | struct mount *shadows) | ||
784 | { | ||
785 | if (shadows) { | ||
786 | hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash); | ||
787 | list_add(&mnt->mnt_child, &shadows->mnt_child); | ||
788 | } else { | ||
789 | hlist_add_head_rcu(&mnt->mnt_hash, | ||
790 | m_hash(&parent->mnt, mnt->mnt_mountpoint)); | ||
791 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); | ||
792 | } | ||
793 | } | ||
794 | |||
781 | /* | 795 | /* |
782 | * vfsmount lock must be held for write | 796 | * vfsmount lock must be held for write |
783 | */ | 797 | */ |
@@ -796,12 +810,7 @@ static void commit_tree(struct mount *mnt, struct mount *shadows) | |||
796 | 810 | ||
797 | list_splice(&head, n->list.prev); | 811 | list_splice(&head, n->list.prev); |
798 | 812 | ||
799 | if (shadows) | 813 | attach_shadowed(mnt, parent, shadows); |
800 | hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash); | ||
801 | else | ||
802 | hlist_add_head_rcu(&mnt->mnt_hash, | ||
803 | m_hash(&parent->mnt, mnt->mnt_mountpoint)); | ||
804 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); | ||
805 | touch_mnt_namespace(n); | 814 | touch_mnt_namespace(n); |
806 | } | 815 | } |
807 | 816 | ||
@@ -1474,6 +1483,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, | |||
1474 | continue; | 1483 | continue; |
1475 | 1484 | ||
1476 | for (s = r; s; s = next_mnt(s, r)) { | 1485 | for (s = r; s; s = next_mnt(s, r)) { |
1486 | struct mount *t = NULL; | ||
1477 | if (!(flag & CL_COPY_UNBINDABLE) && | 1487 | if (!(flag & CL_COPY_UNBINDABLE) && |
1478 | IS_MNT_UNBINDABLE(s)) { | 1488 | IS_MNT_UNBINDABLE(s)) { |
1479 | s = skip_mnt_tree(s); | 1489 | s = skip_mnt_tree(s); |
@@ -1495,7 +1505,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, | |||
1495 | goto out; | 1505 | goto out; |
1496 | lock_mount_hash(); | 1506 | lock_mount_hash(); |
1497 | list_add_tail(&q->mnt_list, &res->mnt_list); | 1507 | list_add_tail(&q->mnt_list, &res->mnt_list); |
1498 | attach_mnt(q, parent, p->mnt_mp); | 1508 | mnt_set_mountpoint(parent, p->mnt_mp, q); |
1509 | if (!list_empty(&parent->mnt_mounts)) { | ||
1510 | t = list_last_entry(&parent->mnt_mounts, | ||
1511 | struct mount, mnt_child); | ||
1512 | if (t->mnt_mp != p->mnt_mp) | ||
1513 | t = NULL; | ||
1514 | } | ||
1515 | attach_shadowed(q, parent, t); | ||
1499 | unlock_mount_hash(); | 1516 | unlock_mount_hash(); |
1500 | } | 1517 | } |
1501 | } | 1518 | } |