aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorNick Piggin <npiggin@kernel.dk>2010-08-17 14:37:38 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2010-08-18 08:35:48 -0400
commit6416ccb7899960868f5016751fb81bf25213d24f (patch)
tree457069571211e4ece844dc332a2f9673705a5bde /include
parent2dc91abe03d8ce6dd7f9251faffafca5f6b9e85d (diff)
fs: scale files_lock
fs: scale files_lock Improve scalability of files_lock by adding per-cpu, per-sb files lists, protected with an lglock. The lglock provides fast access to the per-cpu lists to add and remove files. It also provides a snapshot of all the per-cpu lists (although this is very slow). One difficulty with this approach is that a file can be removed from the list by another CPU. We must track which per-cpu list the file is on with a new variale in the file struct (packed into a hole on 64-bit archs). Scalability could suffer if files are frequently removed from different cpu's list. However loads with frequent removal of files imply short interval between adding and removing the files, and the scheduler attempts to avoid moving processes too far away. Also, even in the case of cross-CPU removal, the hardware has much more opportunity to parallelise cacheline transfers with N cachelines than with 1. A worst-case test of 1 CPU allocating files subsequently being freed by N CPUs degenerates to contending on a single lock, which is no worse than before. When more than one CPU are allocating files, even if they are always freed by different CPUs, there will be more parallelism than the single-lock case. Testing results: On a 2 socket, 8 core opteron, I measure the number of times the lock is taken to remove the file, the number of times it is removed by the same CPU that added it, and the number of times it is removed by the same node that added it. Booting: locks= 25049 cpu-hits= 23174 (92.5%) node-hits= 23945 (95.6%) kbuild -j16 locks=2281913 cpu-hits=2208126 (96.8%) node-hits=2252674 (98.7%) dbench 64 locks=4306582 cpu-hits=4287247 (99.6%) node-hits=4299527 (99.8%) So a file is removed from the same CPU it was added by over 90% of the time. It remains within the same node 95% of the time. Tim Chen ran some numbers for a 64 thread Nehalem system performing a compile. throughput 2.6.34-rc2 24.5 +patch 24.9 us sys idle IO wait (in %) 2.6.34-rc2 51.25 28.25 17.25 3.25 +patch 53.75 18.5 19 8.75 So significantly less CPU time spent in kernel code, higher idle time and slightly higher throughput. Single threaded performance difference was within the noise of microbenchmarks. That is not to say penalty does not exist, the code is larger and more memory accesses required so it will be slightly slower. Cc: linux-kernel@vger.kernel.org Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Signed-off-by: Nick Piggin <npiggin@kernel.dk> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'include')
-rw-r--r--include/linux/fs.h7
1 files changed, 7 insertions, 0 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5e65add0f163..76041b614758 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -920,6 +920,9 @@ struct file {
920#define f_vfsmnt f_path.mnt 920#define f_vfsmnt f_path.mnt
921 const struct file_operations *f_op; 921 const struct file_operations *f_op;
922 spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */ 922 spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */
923#ifdef CONFIG_SMP
924 int f_sb_list_cpu;
925#endif
923 atomic_long_t f_count; 926 atomic_long_t f_count;
924 unsigned int f_flags; 927 unsigned int f_flags;
925 fmode_t f_mode; 928 fmode_t f_mode;
@@ -1334,7 +1337,11 @@ struct super_block {
1334 1337
1335 struct list_head s_inodes; /* all inodes */ 1338 struct list_head s_inodes; /* all inodes */
1336 struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ 1339 struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */
1340#ifdef CONFIG_SMP
1341 struct list_head __percpu *s_files;
1342#else
1337 struct list_head s_files; 1343 struct list_head s_files;
1344#endif
1338 /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */ 1345 /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */
1339 struct list_head s_dentry_lru; /* unused dentry lru */ 1346 struct list_head s_dentry_lru; /* unused dentry lru */
1340 int s_nr_dentry_unused; /* # of dentry on lru */ 1347 int s_nr_dentry_unused; /* # of dentry on lru */