diff options
Diffstat (limited to 'fs/namespace.c')
-rw-r--r-- | fs/namespace.c | 56 |
1 files changed, 35 insertions, 21 deletions
diff --git a/fs/namespace.c b/fs/namespace.c index 2ffc5a2905d4..182bc41cd887 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -52,7 +52,7 @@ static int __init set_mphash_entries(char *str) | |||
52 | } | 52 | } |
53 | __setup("mphash_entries=", set_mphash_entries); | 53 | __setup("mphash_entries=", set_mphash_entries); |
54 | 54 | ||
55 | static int event; | 55 | static u64 event; |
56 | static DEFINE_IDA(mnt_id_ida); | 56 | static DEFINE_IDA(mnt_id_ida); |
57 | static DEFINE_IDA(mnt_group_ida); | 57 | static DEFINE_IDA(mnt_group_ida); |
58 | static DEFINE_SPINLOCK(mnt_id_lock); | 58 | static DEFINE_SPINLOCK(mnt_id_lock); |
@@ -414,9 +414,7 @@ EXPORT_SYMBOL_GPL(mnt_clone_write); | |||
414 | */ | 414 | */ |
415 | int __mnt_want_write_file(struct file *file) | 415 | int __mnt_want_write_file(struct file *file) |
416 | { | 416 | { |
417 | struct inode *inode = file_inode(file); | 417 | if (!(file->f_mode & FMODE_WRITER)) |
418 | |||
419 | if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode)) | ||
420 | return __mnt_want_write(file->f_path.mnt); | 418 | return __mnt_want_write(file->f_path.mnt); |
421 | else | 419 | else |
422 | return mnt_clone_write(file->f_path.mnt); | 420 | return mnt_clone_write(file->f_path.mnt); |
@@ -570,13 +568,17 @@ int sb_prepare_remount_readonly(struct super_block *sb) | |||
570 | static void free_vfsmnt(struct mount *mnt) | 568 | static void free_vfsmnt(struct mount *mnt) |
571 | { | 569 | { |
572 | kfree(mnt->mnt_devname); | 570 | kfree(mnt->mnt_devname); |
573 | mnt_free_id(mnt); | ||
574 | #ifdef CONFIG_SMP | 571 | #ifdef CONFIG_SMP |
575 | free_percpu(mnt->mnt_pcp); | 572 | free_percpu(mnt->mnt_pcp); |
576 | #endif | 573 | #endif |
577 | kmem_cache_free(mnt_cache, mnt); | 574 | kmem_cache_free(mnt_cache, mnt); |
578 | } | 575 | } |
579 | 576 | ||
577 | static void delayed_free_vfsmnt(struct rcu_head *head) | ||
578 | { | ||
579 | free_vfsmnt(container_of(head, struct mount, mnt_rcu)); | ||
580 | } | ||
581 | |||
580 | /* call under rcu_read_lock */ | 582 | /* call under rcu_read_lock */ |
581 | bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) | 583 | bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) |
582 | { | 584 | { |
@@ -848,6 +850,7 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void | |||
848 | 850 | ||
849 | root = mount_fs(type, flags, name, data); | 851 | root = mount_fs(type, flags, name, data); |
850 | if (IS_ERR(root)) { | 852 | if (IS_ERR(root)) { |
853 | mnt_free_id(mnt); | ||
851 | free_vfsmnt(mnt); | 854 | free_vfsmnt(mnt); |
852 | return ERR_CAST(root); | 855 | return ERR_CAST(root); |
853 | } | 856 | } |
@@ -885,7 +888,7 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, | |||
885 | goto out_free; | 888 | goto out_free; |
886 | } | 889 | } |
887 | 890 | ||
888 | mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD; | 891 | mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED); |
889 | /* Don't allow unprivileged users to change mount flags */ | 892 | /* Don't allow unprivileged users to change mount flags */ |
890 | if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY)) | 893 | if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY)) |
891 | mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; | 894 | mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; |
@@ -928,20 +931,11 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, | |||
928 | return mnt; | 931 | return mnt; |
929 | 932 | ||
930 | out_free: | 933 | out_free: |
934 | mnt_free_id(mnt); | ||
931 | free_vfsmnt(mnt); | 935 | free_vfsmnt(mnt); |
932 | return ERR_PTR(err); | 936 | return ERR_PTR(err); |
933 | } | 937 | } |
934 | 938 | ||
935 | static void delayed_free(struct rcu_head *head) | ||
936 | { | ||
937 | struct mount *mnt = container_of(head, struct mount, mnt_rcu); | ||
938 | kfree(mnt->mnt_devname); | ||
939 | #ifdef CONFIG_SMP | ||
940 | free_percpu(mnt->mnt_pcp); | ||
941 | #endif | ||
942 | kmem_cache_free(mnt_cache, mnt); | ||
943 | } | ||
944 | |||
945 | static void mntput_no_expire(struct mount *mnt) | 939 | static void mntput_no_expire(struct mount *mnt) |
946 | { | 940 | { |
947 | put_again: | 941 | put_again: |
@@ -991,7 +985,7 @@ put_again: | |||
991 | dput(mnt->mnt.mnt_root); | 985 | dput(mnt->mnt.mnt_root); |
992 | deactivate_super(mnt->mnt.mnt_sb); | 986 | deactivate_super(mnt->mnt.mnt_sb); |
993 | mnt_free_id(mnt); | 987 | mnt_free_id(mnt); |
994 | call_rcu(&mnt->mnt_rcu, delayed_free); | 988 | call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt); |
995 | } | 989 | } |
996 | 990 | ||
997 | void mntput(struct vfsmount *mnt) | 991 | void mntput(struct vfsmount *mnt) |
@@ -1100,14 +1094,29 @@ static void *m_start(struct seq_file *m, loff_t *pos) | |||
1100 | struct proc_mounts *p = proc_mounts(m); | 1094 | struct proc_mounts *p = proc_mounts(m); |
1101 | 1095 | ||
1102 | down_read(&namespace_sem); | 1096 | down_read(&namespace_sem); |
1103 | return seq_list_start(&p->ns->list, *pos); | 1097 | if (p->cached_event == p->ns->event) { |
1098 | void *v = p->cached_mount; | ||
1099 | if (*pos == p->cached_index) | ||
1100 | return v; | ||
1101 | if (*pos == p->cached_index + 1) { | ||
1102 | v = seq_list_next(v, &p->ns->list, &p->cached_index); | ||
1103 | return p->cached_mount = v; | ||
1104 | } | ||
1105 | } | ||
1106 | |||
1107 | p->cached_event = p->ns->event; | ||
1108 | p->cached_mount = seq_list_start(&p->ns->list, *pos); | ||
1109 | p->cached_index = *pos; | ||
1110 | return p->cached_mount; | ||
1104 | } | 1111 | } |
1105 | 1112 | ||
1106 | static void *m_next(struct seq_file *m, void *v, loff_t *pos) | 1113 | static void *m_next(struct seq_file *m, void *v, loff_t *pos) |
1107 | { | 1114 | { |
1108 | struct proc_mounts *p = proc_mounts(m); | 1115 | struct proc_mounts *p = proc_mounts(m); |
1109 | 1116 | ||
1110 | return seq_list_next(v, &p->ns->list, pos); | 1117 | p->cached_mount = seq_list_next(v, &p->ns->list, pos); |
1118 | p->cached_index = *pos; | ||
1119 | return p->cached_mount; | ||
1111 | } | 1120 | } |
1112 | 1121 | ||
1113 | static void m_stop(struct seq_file *m, void *v) | 1122 | static void m_stop(struct seq_file *m, void *v) |
@@ -1661,9 +1670,9 @@ static int attach_recursive_mnt(struct mount *source_mnt, | |||
1661 | if (err) | 1670 | if (err) |
1662 | goto out; | 1671 | goto out; |
1663 | err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); | 1672 | err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); |
1673 | lock_mount_hash(); | ||
1664 | if (err) | 1674 | if (err) |
1665 | goto out_cleanup_ids; | 1675 | goto out_cleanup_ids; |
1666 | lock_mount_hash(); | ||
1667 | for (p = source_mnt; p; p = next_mnt(p, source_mnt)) | 1676 | for (p = source_mnt; p; p = next_mnt(p, source_mnt)) |
1668 | set_mnt_shared(p); | 1677 | set_mnt_shared(p); |
1669 | } else { | 1678 | } else { |
@@ -1690,6 +1699,11 @@ static int attach_recursive_mnt(struct mount *source_mnt, | |||
1690 | return 0; | 1699 | return 0; |
1691 | 1700 | ||
1692 | out_cleanup_ids: | 1701 | out_cleanup_ids: |
1702 | while (!hlist_empty(&tree_list)) { | ||
1703 | child = hlist_entry(tree_list.first, struct mount, mnt_hash); | ||
1704 | umount_tree(child, 0); | ||
1705 | } | ||
1706 | unlock_mount_hash(); | ||
1693 | cleanup_group_ids(source_mnt, NULL); | 1707 | cleanup_group_ids(source_mnt, NULL); |
1694 | out: | 1708 | out: |
1695 | return err; | 1709 | return err; |
@@ -2044,7 +2058,7 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags) | |||
2044 | struct mount *parent; | 2058 | struct mount *parent; |
2045 | int err; | 2059 | int err; |
2046 | 2060 | ||
2047 | mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | MNT_DOOMED | MNT_SYNC_UMOUNT); | 2061 | mnt_flags &= ~MNT_INTERNAL_FLAGS; |
2048 | 2062 | ||
2049 | mp = lock_mount(path); | 2063 | mp = lock_mount(path); |
2050 | if (IS_ERR(mp)) | 2064 | if (IS_ERR(mp)) |