aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorNick Piggin <npiggin@kernel.dk>2010-08-17 14:37:33 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2010-08-18 08:35:46 -0400
commit2a4419b5b2a77f3f4537c14f7ad7df95770655dd (patch)
treead66519a92b995920ecada788e4a08e265747545 /fs
parent44672e4fbd40e2dda8bbce7d0f71d24dbfc7e00e (diff)
fs: fs_struct rwlock to spinlock
fs: fs_struct rwlock to spinlock struct fs_struct.lock is an rwlock with the read-side used to protect root and pwd members while taking references to them. Taking a reference to a path typically requires just 2 atomic ops, so the critical section is very small. Parallel read-side operations would have cacheline contention on the lock, the dentry, and the vfsmount cachelines, so the rwlock is unlikely to ever give a real parallelism increase. Replace it with a spinlock to avoid one or two atomic operations in typical path lookup fastpath. Signed-off-by: Nick Piggin <npiggin@kernel.dk> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r--fs/exec.c4
-rw-r--r--fs/fs_struct.c32
2 files changed, 18 insertions, 18 deletions
diff --git a/fs/exec.c b/fs/exec.c
index 7761837e4500..5adab2c93eca 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1117,7 +1117,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
1117 bprm->unsafe = tracehook_unsafe_exec(p); 1117 bprm->unsafe = tracehook_unsafe_exec(p);
1118 1118
1119 n_fs = 1; 1119 n_fs = 1;
1120 write_lock(&p->fs->lock); 1120 spin_lock(&p->fs->lock);
1121 rcu_read_lock(); 1121 rcu_read_lock();
1122 for (t = next_thread(p); t != p; t = next_thread(t)) { 1122 for (t = next_thread(p); t != p; t = next_thread(t)) {
1123 if (t->fs == p->fs) 1123 if (t->fs == p->fs)
@@ -1134,7 +1134,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
1134 res = 1; 1134 res = 1;
1135 } 1135 }
1136 } 1136 }
1137 write_unlock(&p->fs->lock); 1137 spin_unlock(&p->fs->lock);
1138 1138
1139 return res; 1139 return res;
1140} 1140}
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index 1ee40eb9a2c0..ed45a9cf5f3d 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -13,11 +13,11 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
13{ 13{
14 struct path old_root; 14 struct path old_root;
15 15
16 write_lock(&fs->lock); 16 spin_lock(&fs->lock);
17 old_root = fs->root; 17 old_root = fs->root;
18 fs->root = *path; 18 fs->root = *path;
19 path_get(path); 19 path_get(path);
20 write_unlock(&fs->lock); 20 spin_unlock(&fs->lock);
21 if (old_root.dentry) 21 if (old_root.dentry)
22 path_put(&old_root); 22 path_put(&old_root);
23} 23}
@@ -30,11 +30,11 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path)
30{ 30{
31 struct path old_pwd; 31 struct path old_pwd;
32 32
33 write_lock(&fs->lock); 33 spin_lock(&fs->lock);
34 old_pwd = fs->pwd; 34 old_pwd = fs->pwd;
35 fs->pwd = *path; 35 fs->pwd = *path;
36 path_get(path); 36 path_get(path);
37 write_unlock(&fs->lock); 37 spin_unlock(&fs->lock);
38 38
39 if (old_pwd.dentry) 39 if (old_pwd.dentry)
40 path_put(&old_pwd); 40 path_put(&old_pwd);
@@ -51,7 +51,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
51 task_lock(p); 51 task_lock(p);
52 fs = p->fs; 52 fs = p->fs;
53 if (fs) { 53 if (fs) {
54 write_lock(&fs->lock); 54 spin_lock(&fs->lock);
55 if (fs->root.dentry == old_root->dentry 55 if (fs->root.dentry == old_root->dentry
56 && fs->root.mnt == old_root->mnt) { 56 && fs->root.mnt == old_root->mnt) {
57 path_get(new_root); 57 path_get(new_root);
@@ -64,7 +64,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
64 fs->pwd = *new_root; 64 fs->pwd = *new_root;
65 count++; 65 count++;
66 } 66 }
67 write_unlock(&fs->lock); 67 spin_unlock(&fs->lock);
68 } 68 }
69 task_unlock(p); 69 task_unlock(p);
70 } while_each_thread(g, p); 70 } while_each_thread(g, p);
@@ -87,10 +87,10 @@ void exit_fs(struct task_struct *tsk)
87 if (fs) { 87 if (fs) {
88 int kill; 88 int kill;
89 task_lock(tsk); 89 task_lock(tsk);
90 write_lock(&fs->lock); 90 spin_lock(&fs->lock);
91 tsk->fs = NULL; 91 tsk->fs = NULL;
92 kill = !--fs->users; 92 kill = !--fs->users;
93 write_unlock(&fs->lock); 93 spin_unlock(&fs->lock);
94 task_unlock(tsk); 94 task_unlock(tsk);
95 if (kill) 95 if (kill)
96 free_fs_struct(fs); 96 free_fs_struct(fs);
@@ -104,7 +104,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
104 if (fs) { 104 if (fs) {
105 fs->users = 1; 105 fs->users = 1;
106 fs->in_exec = 0; 106 fs->in_exec = 0;
107 rwlock_init(&fs->lock); 107 spin_lock_init(&fs->lock);
108 fs->umask = old->umask; 108 fs->umask = old->umask;
109 get_fs_root_and_pwd(old, &fs->root, &fs->pwd); 109 get_fs_root_and_pwd(old, &fs->root, &fs->pwd);
110 } 110 }
@@ -121,10 +121,10 @@ int unshare_fs_struct(void)
121 return -ENOMEM; 121 return -ENOMEM;
122 122
123 task_lock(current); 123 task_lock(current);
124 write_lock(&fs->lock); 124 spin_lock(&fs->lock);
125 kill = !--fs->users; 125 kill = !--fs->users;
126 current->fs = new_fs; 126 current->fs = new_fs;
127 write_unlock(&fs->lock); 127 spin_unlock(&fs->lock);
128 task_unlock(current); 128 task_unlock(current);
129 129
130 if (kill) 130 if (kill)
@@ -143,7 +143,7 @@ EXPORT_SYMBOL(current_umask);
143/* to be mentioned only in INIT_TASK */ 143/* to be mentioned only in INIT_TASK */
144struct fs_struct init_fs = { 144struct fs_struct init_fs = {
145 .users = 1, 145 .users = 1,
146 .lock = __RW_LOCK_UNLOCKED(init_fs.lock), 146 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
147 .umask = 0022, 147 .umask = 0022,
148}; 148};
149 149
@@ -156,14 +156,14 @@ void daemonize_fs_struct(void)
156 156
157 task_lock(current); 157 task_lock(current);
158 158
159 write_lock(&init_fs.lock); 159 spin_lock(&init_fs.lock);
160 init_fs.users++; 160 init_fs.users++;
161 write_unlock(&init_fs.lock); 161 spin_unlock(&init_fs.lock);
162 162
163 write_lock(&fs->lock); 163 spin_lock(&fs->lock);
164 current->fs = &init_fs; 164 current->fs = &init_fs;
165 kill = !--fs->users; 165 kill = !--fs->users;
166 write_unlock(&fs->lock); 166 spin_unlock(&fs->lock);
167 167
168 task_unlock(current); 168 task_unlock(current);
169 if (kill) 169 if (kill)