aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@kernel.dk>2011-01-07 01:49:18 -0500
committerNick Piggin <npiggin@kernel.dk>2011-01-07 01:50:17 -0500
commit86c8749ede0c59e590de9267066932a26f1ce796 (patch)
tree316517d7c03d9caf0577acc517532ed2bc1801cc /fs/dcache.c
parentccd35fb9f4da856b105ea0f1e0cab3702e8ae6ba (diff)
vfs: revert per-cpu nr_unused counters for dentry and inodes
The nr_unused counters count the number of objects on an LRU, and as such they are synchronized with LRU object insertion and removal and scanning, and protected under the LRU lock. Making it per-cpu does not actually get any concurrency improvements because of this lock, and summing the counter is much slower, and incrementing/decrementing it costs more code size and is slower too. These counters should stay per-LRU, which currently means global. Signed-off-by: Nick Piggin <npiggin@kernel.dk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c16
1 files changed, 5 insertions, 11 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 9d1a59dfda0b..f62ba90bce91 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -68,14 +68,12 @@ struct dentry_stat_t dentry_stat = {
68}; 68};
69 69
70static struct percpu_counter nr_dentry __cacheline_aligned_in_smp; 70static struct percpu_counter nr_dentry __cacheline_aligned_in_smp;
71static struct percpu_counter nr_dentry_unused __cacheline_aligned_in_smp;
72 71
73#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 72#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
74int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, 73int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
75 size_t *lenp, loff_t *ppos) 74 size_t *lenp, loff_t *ppos)
76{ 75{
77 dentry_stat.nr_dentry = percpu_counter_sum_positive(&nr_dentry); 76 dentry_stat.nr_dentry = percpu_counter_sum_positive(&nr_dentry);
78 dentry_stat.nr_unused = percpu_counter_sum_positive(&nr_dentry_unused);
79 return proc_dointvec(table, write, buffer, lenp, ppos); 77 return proc_dointvec(table, write, buffer, lenp, ppos);
80} 78}
81#endif 79#endif
@@ -140,7 +138,7 @@ static void dentry_lru_add(struct dentry *dentry)
140 if (list_empty(&dentry->d_lru)) { 138 if (list_empty(&dentry->d_lru)) {
141 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 139 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
142 dentry->d_sb->s_nr_dentry_unused++; 140 dentry->d_sb->s_nr_dentry_unused++;
143 percpu_counter_inc(&nr_dentry_unused); 141 dentry_stat.nr_unused++;
144 } 142 }
145} 143}
146 144
@@ -149,7 +147,7 @@ static void dentry_lru_del(struct dentry *dentry)
149 if (!list_empty(&dentry->d_lru)) { 147 if (!list_empty(&dentry->d_lru)) {
150 list_del_init(&dentry->d_lru); 148 list_del_init(&dentry->d_lru);
151 dentry->d_sb->s_nr_dentry_unused--; 149 dentry->d_sb->s_nr_dentry_unused--;
152 percpu_counter_dec(&nr_dentry_unused); 150 dentry_stat.nr_unused--;
153 } 151 }
154} 152}
155 153
@@ -158,7 +156,7 @@ static void dentry_lru_move_tail(struct dentry *dentry)
158 if (list_empty(&dentry->d_lru)) { 156 if (list_empty(&dentry->d_lru)) {
159 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 157 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
160 dentry->d_sb->s_nr_dentry_unused++; 158 dentry->d_sb->s_nr_dentry_unused++;
161 percpu_counter_inc(&nr_dentry_unused); 159 dentry_stat.nr_unused++;
162 } else { 160 } else {
163 list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 161 list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
164 } 162 }
@@ -546,7 +544,7 @@ static void prune_dcache(int count)
546{ 544{
547 struct super_block *sb, *p = NULL; 545 struct super_block *sb, *p = NULL;
548 int w_count; 546 int w_count;
549 int unused = percpu_counter_sum_positive(&nr_dentry_unused); 547 int unused = dentry_stat.nr_unused;
550 int prune_ratio; 548 int prune_ratio;
551 int pruned; 549 int pruned;
552 550
@@ -908,16 +906,13 @@ EXPORT_SYMBOL(shrink_dcache_parent);
908 */ 906 */
909static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 907static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
910{ 908{
911 int nr_unused;
912
913 if (nr) { 909 if (nr) {
914 if (!(gfp_mask & __GFP_FS)) 910 if (!(gfp_mask & __GFP_FS))
915 return -1; 911 return -1;
916 prune_dcache(nr); 912 prune_dcache(nr);
917 } 913 }
918 914
919 nr_unused = percpu_counter_sum_positive(&nr_dentry_unused); 915 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
920 return (nr_unused / 100) * sysctl_vfs_cache_pressure;
921} 916}
922 917
923static struct shrinker dcache_shrinker = { 918static struct shrinker dcache_shrinker = {
@@ -2424,7 +2419,6 @@ static void __init dcache_init(void)
2424 int loop; 2419 int loop;
2425 2420
2426 percpu_counter_init(&nr_dentry, 0); 2421 percpu_counter_init(&nr_dentry, 0);
2427 percpu_counter_init(&nr_dentry_unused, 0);
2428 2422
2429 /* 2423 /*
2430 * A constructor could be added for stable state like the lists, 2424 * A constructor could be added for stable state like the lists,