diff options
author | npiggin@suse.de <npiggin@suse.de> | 2009-04-26 06:25:54 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2009-06-11 21:36:02 -0400 |
commit | d3ef3d7351ccfbef3e5d926efc5ee332136f40d4 (patch) | |
tree | bd875a2b267ae03b350e259675ccb1a04453b9b9 /include/linux/mount.h | |
parent | 3174c21b74b56c6a53fddd41a30fd6f757a32bd0 (diff) |
fs: mnt_want_write speedup
This patch speeds up lmbench lat_mmap test by about 8%. lat_mmap is set up
basically to mmap a 64MB file on tmpfs, fault in its pages, then unmap it.
A microbenchmark yes, but it exercises some important paths in the mm.
Before:
avg = 501.9
std = 14.7773
After:
avg = 462.286
std = 5.46106
(50 runs of each, stddev gives a reasonable confidence, but there is quite
a bit of variation there still)
It does this by removing the complex per-cpu locking and counter-cache and
replaces it with a percpu counter in struct vfsmount. This makes the code
much simpler, and avoids spinlocks (although the msync is still pretty
costly, unfortunately). It results in about 900 bytes smaller code too. It
does increase the size of a vfsmount, however.
It should also give a speedup on large systems if CPUs are frequently operating
on different mounts (because the existing scheme has to operate on an atomic in
the struct vfsmount when switching between mounts). But I'm most interested in
the single threaded path performance for the moment.
[AV: minor cleanup]
Cc: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'include/linux/mount.h')
-rw-r--r-- | include/linux/mount.h | 21 |
1 files changed, 15 insertions, 6 deletions
diff --git a/include/linux/mount.h b/include/linux/mount.h index 51f55f903aff..ac49c1f8e5c0 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h | |||
@@ -30,7 +30,7 @@ struct mnt_namespace; | |||
30 | #define MNT_STRICTATIME 0x80 | 30 | #define MNT_STRICTATIME 0x80 |
31 | 31 | ||
32 | #define MNT_SHRINKABLE 0x100 | 32 | #define MNT_SHRINKABLE 0x100 |
33 | #define MNT_IMBALANCED_WRITE_COUNT 0x200 /* just for debugging */ | 33 | #define MNT_WRITE_HOLD 0x200 |
34 | 34 | ||
35 | #define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */ | 35 | #define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */ |
36 | #define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */ | 36 | #define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */ |
@@ -65,13 +65,22 @@ struct vfsmount { | |||
65 | int mnt_expiry_mark; /* true if marked for expiry */ | 65 | int mnt_expiry_mark; /* true if marked for expiry */ |
66 | int mnt_pinned; | 66 | int mnt_pinned; |
67 | int mnt_ghosts; | 67 | int mnt_ghosts; |
68 | /* | 68 | #ifdef CONFIG_SMP |
69 | * This value is not stable unless all of the mnt_writers[] spinlocks | 69 | int *mnt_writers; |
70 | * are held, and all mnt_writer[]s on this mount have 0 as their ->count | 70 | #else |
71 | */ | 71 | int mnt_writers; |
72 | atomic_t __mnt_writers; | 72 | #endif |
73 | }; | 73 | }; |
74 | 74 | ||
75 | static inline int *get_mnt_writers_ptr(struct vfsmount *mnt) | ||
76 | { | ||
77 | #ifdef CONFIG_SMP | ||
78 | return mnt->mnt_writers; | ||
79 | #else | ||
80 | return &mnt->mnt_writers; | ||
81 | #endif | ||
82 | } | ||
83 | |||
75 | static inline struct vfsmount *mntget(struct vfsmount *mnt) | 84 | static inline struct vfsmount *mntget(struct vfsmount *mnt) |
76 | { | 85 | { |
77 | if (mnt) | 86 | if (mnt) |