aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fs_struct.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@kernel.dk>2011-01-07 01:49:53 -0500
committerNick Piggin <npiggin@kernel.dk>2011-01-07 01:50:27 -0500
commitc28cc36469554dc55540f059fbdc7fa22a2c31fc (patch)
tree6b867456be48b8633a2d56a99e00bb3faf9dccc7 /fs/fs_struct.c
parent31e6b01f4183ff419a6d1f86177cbf4662347cec (diff)
fs: fs_struct use seqlock
Use a seqlock in the fs_struct to enable us to take an atomic copy of the complete cwd and root paths. Use this in the RCU lookup path to avoid a thread-shared spinlock in RCU lookup operations. Multi-threaded apps may now perform path lookups with scalability matching multi-process apps. Operations such as stat(2) become very scalable for multi-threaded workload. Signed-off-by: Nick Piggin <npiggin@kernel.dk>
Diffstat (limited to 'fs/fs_struct.c')
-rw-r--r--fs/fs_struct.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index ed45a9cf5f3d..60b8531f41c5 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -14,9 +14,11 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
14 struct path old_root; 14 struct path old_root;
15 15
16 spin_lock(&fs->lock); 16 spin_lock(&fs->lock);
17 write_seqcount_begin(&fs->seq);
17 old_root = fs->root; 18 old_root = fs->root;
18 fs->root = *path; 19 fs->root = *path;
19 path_get(path); 20 path_get(path);
21 write_seqcount_end(&fs->seq);
20 spin_unlock(&fs->lock); 22 spin_unlock(&fs->lock);
21 if (old_root.dentry) 23 if (old_root.dentry)
22 path_put(&old_root); 24 path_put(&old_root);
@@ -31,9 +33,11 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path)
31 struct path old_pwd; 33 struct path old_pwd;
32 34
33 spin_lock(&fs->lock); 35 spin_lock(&fs->lock);
36 write_seqcount_begin(&fs->seq);
34 old_pwd = fs->pwd; 37 old_pwd = fs->pwd;
35 fs->pwd = *path; 38 fs->pwd = *path;
36 path_get(path); 39 path_get(path);
40 write_seqcount_end(&fs->seq);
37 spin_unlock(&fs->lock); 41 spin_unlock(&fs->lock);
38 42
39 if (old_pwd.dentry) 43 if (old_pwd.dentry)
@@ -52,6 +56,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
52 fs = p->fs; 56 fs = p->fs;
53 if (fs) { 57 if (fs) {
54 spin_lock(&fs->lock); 58 spin_lock(&fs->lock);
59 write_seqcount_begin(&fs->seq);
55 if (fs->root.dentry == old_root->dentry 60 if (fs->root.dentry == old_root->dentry
56 && fs->root.mnt == old_root->mnt) { 61 && fs->root.mnt == old_root->mnt) {
57 path_get(new_root); 62 path_get(new_root);
@@ -64,6 +69,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
64 fs->pwd = *new_root; 69 fs->pwd = *new_root;
65 count++; 70 count++;
66 } 71 }
72 write_seqcount_end(&fs->seq);
67 spin_unlock(&fs->lock); 73 spin_unlock(&fs->lock);
68 } 74 }
69 task_unlock(p); 75 task_unlock(p);
@@ -88,8 +94,10 @@ void exit_fs(struct task_struct *tsk)
88 int kill; 94 int kill;
89 task_lock(tsk); 95 task_lock(tsk);
90 spin_lock(&fs->lock); 96 spin_lock(&fs->lock);
97 write_seqcount_begin(&fs->seq);
91 tsk->fs = NULL; 98 tsk->fs = NULL;
92 kill = !--fs->users; 99 kill = !--fs->users;
100 write_seqcount_end(&fs->seq);
93 spin_unlock(&fs->lock); 101 spin_unlock(&fs->lock);
94 task_unlock(tsk); 102 task_unlock(tsk);
95 if (kill) 103 if (kill)
@@ -105,6 +113,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
105 fs->users = 1; 113 fs->users = 1;
106 fs->in_exec = 0; 114 fs->in_exec = 0;
107 spin_lock_init(&fs->lock); 115 spin_lock_init(&fs->lock);
116 seqcount_init(&fs->seq);
108 fs->umask = old->umask; 117 fs->umask = old->umask;
109 get_fs_root_and_pwd(old, &fs->root, &fs->pwd); 118 get_fs_root_and_pwd(old, &fs->root, &fs->pwd);
110 } 119 }
@@ -144,6 +153,7 @@ EXPORT_SYMBOL(current_umask);
144struct fs_struct init_fs = { 153struct fs_struct init_fs = {
145 .users = 1, 154 .users = 1,
146 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock), 155 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
156 .seq = SEQCNT_ZERO,
147 .umask = 0022, 157 .umask = 0022,
148}; 158};
149 159