aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <npiggin@kernel.dk>2011-01-07 01:49:18 -0500
committerNick Piggin <npiggin@kernel.dk>2011-01-07 01:50:17 -0500
commit86c8749ede0c59e590de9267066932a26f1ce796 (patch)
tree316517d7c03d9caf0577acc517532ed2bc1801cc
parentccd35fb9f4da856b105ea0f1e0cab3702e8ae6ba (diff)
vfs: revert per-cpu nr_unused counters for dentry and inodes
The nr_unused counters count the number of objects on an LRU, and as such they are synchronized with LRU object insertion and removal and scanning, and protected under the LRU lock. Making it per-cpu does not actually get any concurrency improvements because of this lock, and summing the counter is much slower, and incrementing/decrementing it costs more code size and is slower too. These counters should stay per-LRU, which currently means global. Signed-off-by: Nick Piggin <npiggin@kernel.dk>
-rw-r--r--fs/dcache.c16
-rw-r--r--fs/inode.c17
2 files changed, 12 insertions, 21 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 9d1a59dfda0b..f62ba90bce91 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -68,14 +68,12 @@ struct dentry_stat_t dentry_stat = {
68}; 68};
69 69
70static struct percpu_counter nr_dentry __cacheline_aligned_in_smp; 70static struct percpu_counter nr_dentry __cacheline_aligned_in_smp;
71static struct percpu_counter nr_dentry_unused __cacheline_aligned_in_smp;
72 71
73#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 72#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
74int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, 73int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
75 size_t *lenp, loff_t *ppos) 74 size_t *lenp, loff_t *ppos)
76{ 75{
77 dentry_stat.nr_dentry = percpu_counter_sum_positive(&nr_dentry); 76 dentry_stat.nr_dentry = percpu_counter_sum_positive(&nr_dentry);
78 dentry_stat.nr_unused = percpu_counter_sum_positive(&nr_dentry_unused);
79 return proc_dointvec(table, write, buffer, lenp, ppos); 77 return proc_dointvec(table, write, buffer, lenp, ppos);
80} 78}
81#endif 79#endif
@@ -140,7 +138,7 @@ static void dentry_lru_add(struct dentry *dentry)
140 if (list_empty(&dentry->d_lru)) { 138 if (list_empty(&dentry->d_lru)) {
141 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 139 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
142 dentry->d_sb->s_nr_dentry_unused++; 140 dentry->d_sb->s_nr_dentry_unused++;
143 percpu_counter_inc(&nr_dentry_unused); 141 dentry_stat.nr_unused++;
144 } 142 }
145} 143}
146 144
@@ -149,7 +147,7 @@ static void dentry_lru_del(struct dentry *dentry)
149 if (!list_empty(&dentry->d_lru)) { 147 if (!list_empty(&dentry->d_lru)) {
150 list_del_init(&dentry->d_lru); 148 list_del_init(&dentry->d_lru);
151 dentry->d_sb->s_nr_dentry_unused--; 149 dentry->d_sb->s_nr_dentry_unused--;
152 percpu_counter_dec(&nr_dentry_unused); 150 dentry_stat.nr_unused--;
153 } 151 }
154} 152}
155 153
@@ -158,7 +156,7 @@ static void dentry_lru_move_tail(struct dentry *dentry)
158 if (list_empty(&dentry->d_lru)) { 156 if (list_empty(&dentry->d_lru)) {
159 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 157 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
160 dentry->d_sb->s_nr_dentry_unused++; 158 dentry->d_sb->s_nr_dentry_unused++;
161 percpu_counter_inc(&nr_dentry_unused); 159 dentry_stat.nr_unused++;
162 } else { 160 } else {
163 list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 161 list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
164 } 162 }
@@ -546,7 +544,7 @@ static void prune_dcache(int count)
546{ 544{
547 struct super_block *sb, *p = NULL; 545 struct super_block *sb, *p = NULL;
548 int w_count; 546 int w_count;
549 int unused = percpu_counter_sum_positive(&nr_dentry_unused); 547 int unused = dentry_stat.nr_unused;
550 int prune_ratio; 548 int prune_ratio;
551 int pruned; 549 int pruned;
552 550
@@ -908,16 +906,13 @@ EXPORT_SYMBOL(shrink_dcache_parent);
908 */ 906 */
909static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 907static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
910{ 908{
911 int nr_unused;
912
913 if (nr) { 909 if (nr) {
914 if (!(gfp_mask & __GFP_FS)) 910 if (!(gfp_mask & __GFP_FS))
915 return -1; 911 return -1;
916 prune_dcache(nr); 912 prune_dcache(nr);
917 } 913 }
918 914
919 nr_unused = percpu_counter_sum_positive(&nr_dentry_unused); 915 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
920 return (nr_unused / 100) * sysctl_vfs_cache_pressure;
921} 916}
922 917
923static struct shrinker dcache_shrinker = { 918static struct shrinker dcache_shrinker = {
@@ -2424,7 +2419,6 @@ static void __init dcache_init(void)
2424 int loop; 2419 int loop;
2425 2420
2426 percpu_counter_init(&nr_dentry, 0); 2421 percpu_counter_init(&nr_dentry, 0);
2427 percpu_counter_init(&nr_dentry_unused, 0);
2428 2422
2429 /* 2423 /*
2430 * A constructor could be added for stable state like the lists, 2424 * A constructor could be added for stable state like the lists,
diff --git a/fs/inode.c b/fs/inode.c
index ae2727ab0c3a..efc43979709f 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -103,7 +103,6 @@ static DECLARE_RWSEM(iprune_sem);
103struct inodes_stat_t inodes_stat; 103struct inodes_stat_t inodes_stat;
104 104
105static struct percpu_counter nr_inodes __cacheline_aligned_in_smp; 105static struct percpu_counter nr_inodes __cacheline_aligned_in_smp;
106static struct percpu_counter nr_inodes_unused __cacheline_aligned_in_smp;
107 106
108static struct kmem_cache *inode_cachep __read_mostly; 107static struct kmem_cache *inode_cachep __read_mostly;
109 108
@@ -114,7 +113,7 @@ static inline int get_nr_inodes(void)
114 113
115static inline int get_nr_inodes_unused(void) 114static inline int get_nr_inodes_unused(void)
116{ 115{
117 return percpu_counter_sum_positive(&nr_inodes_unused); 116 return inodes_stat.nr_unused;
118} 117}
119 118
120int get_nr_dirty_inodes(void) 119int get_nr_dirty_inodes(void)
@@ -132,7 +131,6 @@ int proc_nr_inodes(ctl_table *table, int write,
132 void __user *buffer, size_t *lenp, loff_t *ppos) 131 void __user *buffer, size_t *lenp, loff_t *ppos)
133{ 132{
134 inodes_stat.nr_inodes = get_nr_inodes(); 133 inodes_stat.nr_inodes = get_nr_inodes();
135 inodes_stat.nr_unused = get_nr_inodes_unused();
136 return proc_dointvec(table, write, buffer, lenp, ppos); 134 return proc_dointvec(table, write, buffer, lenp, ppos);
137} 135}
138#endif 136#endif
@@ -335,7 +333,7 @@ static void inode_lru_list_add(struct inode *inode)
335{ 333{
336 if (list_empty(&inode->i_lru)) { 334 if (list_empty(&inode->i_lru)) {
337 list_add(&inode->i_lru, &inode_lru); 335 list_add(&inode->i_lru, &inode_lru);
338 percpu_counter_inc(&nr_inodes_unused); 336 inodes_stat.nr_unused++;
339 } 337 }
340} 338}
341 339
@@ -343,7 +341,7 @@ static void inode_lru_list_del(struct inode *inode)
343{ 341{
344 if (!list_empty(&inode->i_lru)) { 342 if (!list_empty(&inode->i_lru)) {
345 list_del_init(&inode->i_lru); 343 list_del_init(&inode->i_lru);
346 percpu_counter_dec(&nr_inodes_unused); 344 inodes_stat.nr_unused--;
347 } 345 }
348} 346}
349 347
@@ -513,7 +511,7 @@ void evict_inodes(struct super_block *sb)
513 list_move(&inode->i_lru, &dispose); 511 list_move(&inode->i_lru, &dispose);
514 list_del_init(&inode->i_wb_list); 512 list_del_init(&inode->i_wb_list);
515 if (!(inode->i_state & (I_DIRTY | I_SYNC))) 513 if (!(inode->i_state & (I_DIRTY | I_SYNC)))
516 percpu_counter_dec(&nr_inodes_unused); 514 inodes_stat.nr_unused--;
517 } 515 }
518 spin_unlock(&inode_lock); 516 spin_unlock(&inode_lock);
519 517
@@ -554,7 +552,7 @@ int invalidate_inodes(struct super_block *sb)
554 list_move(&inode->i_lru, &dispose); 552 list_move(&inode->i_lru, &dispose);
555 list_del_init(&inode->i_wb_list); 553 list_del_init(&inode->i_wb_list);
556 if (!(inode->i_state & (I_DIRTY | I_SYNC))) 554 if (!(inode->i_state & (I_DIRTY | I_SYNC)))
557 percpu_counter_dec(&nr_inodes_unused); 555 inodes_stat.nr_unused--;
558 } 556 }
559 spin_unlock(&inode_lock); 557 spin_unlock(&inode_lock);
560 558
@@ -616,7 +614,7 @@ static void prune_icache(int nr_to_scan)
616 if (atomic_read(&inode->i_count) || 614 if (atomic_read(&inode->i_count) ||
617 (inode->i_state & ~I_REFERENCED)) { 615 (inode->i_state & ~I_REFERENCED)) {
618 list_del_init(&inode->i_lru); 616 list_del_init(&inode->i_lru);
619 percpu_counter_dec(&nr_inodes_unused); 617 inodes_stat.nr_unused--;
620 continue; 618 continue;
621 } 619 }
622 620
@@ -650,7 +648,7 @@ static void prune_icache(int nr_to_scan)
650 */ 648 */
651 list_move(&inode->i_lru, &freeable); 649 list_move(&inode->i_lru, &freeable);
652 list_del_init(&inode->i_wb_list); 650 list_del_init(&inode->i_wb_list);
653 percpu_counter_dec(&nr_inodes_unused); 651 inodes_stat.nr_unused--;
654 } 652 }
655 if (current_is_kswapd()) 653 if (current_is_kswapd())
656 __count_vm_events(KSWAPD_INODESTEAL, reap); 654 __count_vm_events(KSWAPD_INODESTEAL, reap);
@@ -1649,7 +1647,6 @@ void __init inode_init(void)
1649 init_once); 1647 init_once);
1650 register_shrinker(&icache_shrinker); 1648 register_shrinker(&icache_shrinker);
1651 percpu_counter_init(&nr_inodes, 0); 1649 percpu_counter_init(&nr_inodes, 0);
1652 percpu_counter_init(&nr_inodes_unused, 0);
1653 1650
1654 /* Hash may have been set up in inode_init_early */ 1651 /* Hash may have been set up in inode_init_early */
1655 if (!hashdist) 1652 if (!hashdist)