aboutsummaryrefslogtreecommitdiffstats
path: root/fs/namespace.c
diff options
context:
space:
mode:
authorMiklos Szeredi <mszeredi@suse.cz>2008-03-27 08:06:23 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2008-04-23 00:04:51 -0400
commit719f5d7f0b90ac2c8f8ca4232eb322b266fea01e (patch)
treeed3f63e0856b8b319764d475e68b35719ac0ccb9 /fs/namespace.c
parent73cd49ecdde92fdce131938bdaff4993010d181b (diff)
[patch 4/7] vfs: mountinfo: add mount peer group ID
Add a unique ID to each peer group using the IDR infrastructure. The identifiers are reused after the peer group dissolves. The IDR structures are protected by holding namepspace_sem for write while allocating or deallocating IDs. IDs are allocated when a previously unshared vfsmount becomes the first member of a peer group. When a new member is added to an existing group, the ID is copied from one of the old members. IDs are freed when the last member of a peer group is unshared. Setting the MNT_SHARED flag on members of a subtree is done as a separate step, after all the IDs have been allocated. This way an allocation failure can be cleaned up easilty, without affecting the propagation state. Based on design sketch by Al Viro. Signed-off-by: Miklos Szeredi <mszeredi@suse.cz> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/namespace.c')
-rw-r--r--fs/namespace.c93
1 files changed, 90 insertions, 3 deletions
diff --git a/fs/namespace.c b/fs/namespace.c
index 8ca6317cb401..cefa1d9939b0 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -41,6 +41,7 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
41 41
42static int event; 42static int event;
43static DEFINE_IDA(mnt_id_ida); 43static DEFINE_IDA(mnt_id_ida);
44static DEFINE_IDA(mnt_group_ida);
44 45
45static struct list_head *mount_hashtable __read_mostly; 46static struct list_head *mount_hashtable __read_mostly;
46static struct kmem_cache *mnt_cache __read_mostly; 47static struct kmem_cache *mnt_cache __read_mostly;
@@ -83,6 +84,28 @@ static void mnt_free_id(struct vfsmount *mnt)
83 spin_unlock(&vfsmount_lock); 84 spin_unlock(&vfsmount_lock);
84} 85}
85 86
87/*
88 * Allocate a new peer group ID
89 *
90 * mnt_group_ida is protected by namespace_sem
91 */
92static int mnt_alloc_group_id(struct vfsmount *mnt)
93{
94 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
95 return -ENOMEM;
96
97 return ida_get_new_above(&mnt_group_ida, 1, &mnt->mnt_group_id);
98}
99
100/*
101 * Release a peer group ID
102 */
103void mnt_release_group_id(struct vfsmount *mnt)
104{
105 ida_remove(&mnt_group_ida, mnt->mnt_group_id);
106 mnt->mnt_group_id = 0;
107}
108
86struct vfsmount *alloc_vfsmnt(const char *name) 109struct vfsmount *alloc_vfsmnt(const char *name)
87{ 110{
88 struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); 111 struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
@@ -533,6 +556,17 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
533 struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname); 556 struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
534 557
535 if (mnt) { 558 if (mnt) {
559 if (flag & (CL_SLAVE | CL_PRIVATE))
560 mnt->mnt_group_id = 0; /* not a peer of original */
561 else
562 mnt->mnt_group_id = old->mnt_group_id;
563
564 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
565 int err = mnt_alloc_group_id(mnt);
566 if (err)
567 goto out_free;
568 }
569
536 mnt->mnt_flags = old->mnt_flags; 570 mnt->mnt_flags = old->mnt_flags;
537 atomic_inc(&sb->s_active); 571 atomic_inc(&sb->s_active);
538 mnt->mnt_sb = sb; 572 mnt->mnt_sb = sb;
@@ -562,6 +596,10 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
562 } 596 }
563 } 597 }
564 return mnt; 598 return mnt;
599
600 out_free:
601 free_vfsmnt(mnt);
602 return NULL;
565} 603}
566 604
567static inline void __mntput(struct vfsmount *mnt) 605static inline void __mntput(struct vfsmount *mnt)
@@ -1142,6 +1180,33 @@ void drop_collected_mounts(struct vfsmount *mnt)
1142 release_mounts(&umount_list); 1180 release_mounts(&umount_list);
1143} 1181}
1144 1182
1183static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
1184{
1185 struct vfsmount *p;
1186
1187 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1188 if (p->mnt_group_id && !IS_MNT_SHARED(p))
1189 mnt_release_group_id(p);
1190 }
1191}
1192
1193static int invent_group_ids(struct vfsmount *mnt, bool recurse)
1194{
1195 struct vfsmount *p;
1196
1197 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1198 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1199 int err = mnt_alloc_group_id(p);
1200 if (err) {
1201 cleanup_group_ids(mnt, p);
1202 return err;
1203 }
1204 }
1205 }
1206
1207 return 0;
1208}
1209
1145/* 1210/*
1146 * @source_mnt : mount tree to be attached 1211 * @source_mnt : mount tree to be attached
1147 * @nd : place the mount tree @source_mnt is attached 1212 * @nd : place the mount tree @source_mnt is attached
@@ -1212,9 +1277,16 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
1212 struct vfsmount *dest_mnt = path->mnt; 1277 struct vfsmount *dest_mnt = path->mnt;
1213 struct dentry *dest_dentry = path->dentry; 1278 struct dentry *dest_dentry = path->dentry;
1214 struct vfsmount *child, *p; 1279 struct vfsmount *child, *p;
1280 int err;
1215 1281
1216 if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list)) 1282 if (IS_MNT_SHARED(dest_mnt)) {
1217 return -EINVAL; 1283 err = invent_group_ids(source_mnt, true);
1284 if (err)
1285 goto out;
1286 }
1287 err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list);
1288 if (err)
1289 goto out_cleanup_ids;
1218 1290
1219 if (IS_MNT_SHARED(dest_mnt)) { 1291 if (IS_MNT_SHARED(dest_mnt)) {
1220 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 1292 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
@@ -1237,6 +1309,12 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
1237 } 1309 }
1238 spin_unlock(&vfsmount_lock); 1310 spin_unlock(&vfsmount_lock);
1239 return 0; 1311 return 0;
1312
1313 out_cleanup_ids:
1314 if (IS_MNT_SHARED(dest_mnt))
1315 cleanup_group_ids(source_mnt, NULL);
1316 out:
1317 return err;
1240} 1318}
1241 1319
1242static int graft_tree(struct vfsmount *mnt, struct path *path) 1320static int graft_tree(struct vfsmount *mnt, struct path *path)
@@ -1277,6 +1355,7 @@ static noinline int do_change_type(struct nameidata *nd, int flag)
1277 struct vfsmount *m, *mnt = nd->path.mnt; 1355 struct vfsmount *m, *mnt = nd->path.mnt;
1278 int recurse = flag & MS_REC; 1356 int recurse = flag & MS_REC;
1279 int type = flag & ~MS_REC; 1357 int type = flag & ~MS_REC;
1358 int err = 0;
1280 1359
1281 if (!capable(CAP_SYS_ADMIN)) 1360 if (!capable(CAP_SYS_ADMIN))
1282 return -EPERM; 1361 return -EPERM;
@@ -1285,12 +1364,20 @@ static noinline int do_change_type(struct nameidata *nd, int flag)
1285 return -EINVAL; 1364 return -EINVAL;
1286 1365
1287 down_write(&namespace_sem); 1366 down_write(&namespace_sem);
1367 if (type == MS_SHARED) {
1368 err = invent_group_ids(mnt, recurse);
1369 if (err)
1370 goto out_unlock;
1371 }
1372
1288 spin_lock(&vfsmount_lock); 1373 spin_lock(&vfsmount_lock);
1289 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) 1374 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1290 change_mnt_propagation(m, type); 1375 change_mnt_propagation(m, type);
1291 spin_unlock(&vfsmount_lock); 1376 spin_unlock(&vfsmount_lock);
1377
1378 out_unlock:
1292 up_write(&namespace_sem); 1379 up_write(&namespace_sem);
1293 return 0; 1380 return err;
1294} 1381}
1295 1382
1296/* 1383/*