aboutsummaryrefslogtreecommitdiffstats
path: root/fs/namei.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@kernel.dk>2011-01-07 01:49:53 -0500
committerNick Piggin <npiggin@kernel.dk>2011-01-07 01:50:27 -0500
commitc28cc36469554dc55540f059fbdc7fa22a2c31fc (patch)
tree6b867456be48b8633a2d56a99e00bb3faf9dccc7 /fs/namei.c
parent31e6b01f4183ff419a6d1f86177cbf4662347cec (diff)
fs: fs_struct use seqlock
Use a seqlock in the fs_struct to enable us to take an atomic copy of the complete cwd and root paths. Use this in the RCU lookup path to avoid a thread-shared spinlock in RCU lookup operations. Multi-threaded apps may now perform path lookups with scalability matching multi-process apps. Operations such as stat(2) become very scalable for multi-threaded workload. Signed-off-by: Nick Piggin <npiggin@kernel.dk>
Diffstat (limited to 'fs/namei.c')
-rw-r--r--fs/namei.c34
1 files changed, 21 insertions, 13 deletions
diff --git a/fs/namei.c b/fs/namei.c
index 8d3f15b3a541..c731b50a6184 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -684,9 +684,12 @@ static __always_inline void set_root_rcu(struct nameidata *nd)
684{ 684{
685 if (!nd->root.mnt) { 685 if (!nd->root.mnt) {
686 struct fs_struct *fs = current->fs; 686 struct fs_struct *fs = current->fs;
687 spin_lock(&fs->lock); 687 unsigned seq;
688 nd->root = fs->root; 688
689 spin_unlock(&fs->lock); 689 do {
690 seq = read_seqcount_begin(&fs->seq);
691 nd->root = fs->root;
692 } while (read_seqcount_retry(&fs->seq, seq));
690 } 693 }
691} 694}
692 695
@@ -1369,26 +1372,31 @@ static int path_init_rcu(int dfd, const char *name, unsigned int flags, struct n
1369 1372
1370 if (*name=='/') { 1373 if (*name=='/') {
1371 struct fs_struct *fs = current->fs; 1374 struct fs_struct *fs = current->fs;
1375 unsigned seq;
1372 1376
1373 br_read_lock(vfsmount_lock); 1377 br_read_lock(vfsmount_lock);
1374 rcu_read_lock(); 1378 rcu_read_lock();
1375 1379
1376 spin_lock(&fs->lock); 1380 do {
1377 nd->root = fs->root; 1381 seq = read_seqcount_begin(&fs->seq);
1378 nd->path = nd->root; 1382 nd->root = fs->root;
1379 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); 1383 nd->path = nd->root;
1380 spin_unlock(&fs->lock); 1384 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1385 } while (read_seqcount_retry(&fs->seq, seq));
1381 1386
1382 } else if (dfd == AT_FDCWD) { 1387 } else if (dfd == AT_FDCWD) {
1383 struct fs_struct *fs = current->fs; 1388 struct fs_struct *fs = current->fs;
1389 unsigned seq;
1384 1390
1385 br_read_lock(vfsmount_lock); 1391 br_read_lock(vfsmount_lock);
1386 rcu_read_lock(); 1392 rcu_read_lock();
1387 1393
1388 spin_lock(&fs->lock); 1394 do {
1389 nd->path = fs->pwd; 1395 seq = read_seqcount_begin(&fs->seq);
1390 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); 1396 nd->path = fs->pwd;
1391 spin_unlock(&fs->lock); 1397 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1398 } while (read_seqcount_retry(&fs->seq, seq));
1399
1392 } else { 1400 } else {
1393 struct dentry *dentry; 1401 struct dentry *dentry;
1394 1402
@@ -1411,7 +1419,7 @@ static int path_init_rcu(int dfd, const char *name, unsigned int flags, struct n
1411 if (fput_needed) 1419 if (fput_needed)
1412 nd->file = file; 1420 nd->file = file;
1413 1421
1414 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); 1422 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1415 br_read_lock(vfsmount_lock); 1423 br_read_lock(vfsmount_lock);
1416 rcu_read_lock(); 1424 rcu_read_lock();
1417 } 1425 }