diff options
author | Nick Piggin <npiggin@kernel.dk> | 2010-08-17 14:37:39 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2010-08-18 08:35:48 -0400 |
commit | 99b7db7b8ffd6bb755eb0a175596421a0b581cb2 (patch) | |
tree | cbaf57d252f0852f967d3fd5a5f87472964a01fe /fs/dcache.c | |
parent | 6416ccb7899960868f5016751fb81bf25213d24f (diff) |
fs: brlock vfsmount_lock
fs: brlock vfsmount_lock
Use a brlock for the vfsmount lock. It must be taken for write whenever
modifying the mount hash or associated fields, and may be taken for read when
performing mount hash lookups.
A new lock is added for the mnt-id allocator, so it doesn't need to take
the heavy vfsmount write-lock.
The number of atomics should remain the same for fastpath rlock cases, though
code would be slightly slower due to per-cpu access. Scalability is not not be
much improved in common cases yet, due to other locks (ie. dcache_lock) getting
in the way. However path lookups crossing mountpoints should be one case where
scalability is improved (currently requiring the global lock).
The slowpath is slower due to use of brlock. On a 64 core, 64 socket, 32 node
Altix system (high latency to remote nodes), a simple umount microbenchmark
(mount --bind mnt mnt2 ; umount mnt2 loop 1000 times), before this patch it
took 6.8s, afterwards took 7.1s, about 5% slower.
Cc: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: Nick Piggin <npiggin@kernel.dk>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r-- | fs/dcache.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/fs/dcache.c b/fs/dcache.c index d56a40b5a577..83293be48149 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -1935,7 +1935,7 @@ static int prepend_path(const struct path *path, struct path *root, | |||
1935 | bool slash = false; | 1935 | bool slash = false; |
1936 | int error = 0; | 1936 | int error = 0; |
1937 | 1937 | ||
1938 | spin_lock(&vfsmount_lock); | 1938 | br_read_lock(vfsmount_lock); |
1939 | while (dentry != root->dentry || vfsmnt != root->mnt) { | 1939 | while (dentry != root->dentry || vfsmnt != root->mnt) { |
1940 | struct dentry * parent; | 1940 | struct dentry * parent; |
1941 | 1941 | ||
@@ -1964,7 +1964,7 @@ out: | |||
1964 | if (!error && !slash) | 1964 | if (!error && !slash) |
1965 | error = prepend(buffer, buflen, "/", 1); | 1965 | error = prepend(buffer, buflen, "/", 1); |
1966 | 1966 | ||
1967 | spin_unlock(&vfsmount_lock); | 1967 | br_read_unlock(vfsmount_lock); |
1968 | return error; | 1968 | return error; |
1969 | 1969 | ||
1970 | global_root: | 1970 | global_root: |
@@ -2302,11 +2302,12 @@ int path_is_under(struct path *path1, struct path *path2) | |||
2302 | struct vfsmount *mnt = path1->mnt; | 2302 | struct vfsmount *mnt = path1->mnt; |
2303 | struct dentry *dentry = path1->dentry; | 2303 | struct dentry *dentry = path1->dentry; |
2304 | int res; | 2304 | int res; |
2305 | spin_lock(&vfsmount_lock); | 2305 | |
2306 | br_read_lock(vfsmount_lock); | ||
2306 | if (mnt != path2->mnt) { | 2307 | if (mnt != path2->mnt) { |
2307 | for (;;) { | 2308 | for (;;) { |
2308 | if (mnt->mnt_parent == mnt) { | 2309 | if (mnt->mnt_parent == mnt) { |
2309 | spin_unlock(&vfsmount_lock); | 2310 | br_read_unlock(vfsmount_lock); |
2310 | return 0; | 2311 | return 0; |
2311 | } | 2312 | } |
2312 | if (mnt->mnt_parent == path2->mnt) | 2313 | if (mnt->mnt_parent == path2->mnt) |
@@ -2316,7 +2317,7 @@ int path_is_under(struct path *path1, struct path *path2) | |||
2316 | dentry = mnt->mnt_mountpoint; | 2317 | dentry = mnt->mnt_mountpoint; |
2317 | } | 2318 | } |
2318 | res = is_subdir(dentry, path2->dentry); | 2319 | res = is_subdir(dentry, path2->dentry); |
2319 | spin_unlock(&vfsmount_lock); | 2320 | br_read_unlock(vfsmount_lock); |
2320 | return res; | 2321 | return res; |
2321 | } | 2322 | } |
2322 | EXPORT_SYMBOL(path_is_under); | 2323 | EXPORT_SYMBOL(path_is_under); |