aboutsummaryrefslogtreecommitdiffstats
path: root/fs/inode.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@kernel.dk>2011-01-07 01:49:18 -0500
committerNick Piggin <npiggin@kernel.dk>2011-01-07 01:50:17 -0500
commit86c8749ede0c59e590de9267066932a26f1ce796 (patch)
tree316517d7c03d9caf0577acc517532ed2bc1801cc /fs/inode.c
parentccd35fb9f4da856b105ea0f1e0cab3702e8ae6ba (diff)
vfs: revert per-cpu nr_unused counters for dentry and inodes
The nr_unused counters count the number of objects on an LRU, and as such they are synchronized with LRU object insertion and removal and scanning, and protected under the LRU lock. Making it per-cpu does not actually get any concurrency improvements because of this lock, and summing the counter is much slower, and incrementing/decrementing it costs more code size and is slower too. These counters should stay per-LRU, which currently means global. Signed-off-by: Nick Piggin <npiggin@kernel.dk>
Diffstat (limited to 'fs/inode.c')
-rw-r--r--fs/inode.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/fs/inode.c b/fs/inode.c
index ae2727ab0c3a..efc43979709f 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -103,7 +103,6 @@ static DECLARE_RWSEM(iprune_sem);
103struct inodes_stat_t inodes_stat; 103struct inodes_stat_t inodes_stat;
104 104
105static struct percpu_counter nr_inodes __cacheline_aligned_in_smp; 105static struct percpu_counter nr_inodes __cacheline_aligned_in_smp;
106static struct percpu_counter nr_inodes_unused __cacheline_aligned_in_smp;
107 106
108static struct kmem_cache *inode_cachep __read_mostly; 107static struct kmem_cache *inode_cachep __read_mostly;
109 108
@@ -114,7 +113,7 @@ static inline int get_nr_inodes(void)
114 113
115static inline int get_nr_inodes_unused(void) 114static inline int get_nr_inodes_unused(void)
116{ 115{
117 return percpu_counter_sum_positive(&nr_inodes_unused); 116 return inodes_stat.nr_unused;
118} 117}
119 118
120int get_nr_dirty_inodes(void) 119int get_nr_dirty_inodes(void)
@@ -132,7 +131,6 @@ int proc_nr_inodes(ctl_table *table, int write,
132 void __user *buffer, size_t *lenp, loff_t *ppos) 131 void __user *buffer, size_t *lenp, loff_t *ppos)
133{ 132{
134 inodes_stat.nr_inodes = get_nr_inodes(); 133 inodes_stat.nr_inodes = get_nr_inodes();
135 inodes_stat.nr_unused = get_nr_inodes_unused();
136 return proc_dointvec(table, write, buffer, lenp, ppos); 134 return proc_dointvec(table, write, buffer, lenp, ppos);
137} 135}
138#endif 136#endif
@@ -335,7 +333,7 @@ static void inode_lru_list_add(struct inode *inode)
335{ 333{
336 if (list_empty(&inode->i_lru)) { 334 if (list_empty(&inode->i_lru)) {
337 list_add(&inode->i_lru, &inode_lru); 335 list_add(&inode->i_lru, &inode_lru);
338 percpu_counter_inc(&nr_inodes_unused); 336 inodes_stat.nr_unused++;
339 } 337 }
340} 338}
341 339
@@ -343,7 +341,7 @@ static void inode_lru_list_del(struct inode *inode)
343{ 341{
344 if (!list_empty(&inode->i_lru)) { 342 if (!list_empty(&inode->i_lru)) {
345 list_del_init(&inode->i_lru); 343 list_del_init(&inode->i_lru);
346 percpu_counter_dec(&nr_inodes_unused); 344 inodes_stat.nr_unused--;
347 } 345 }
348} 346}
349 347
@@ -513,7 +511,7 @@ void evict_inodes(struct super_block *sb)
513 list_move(&inode->i_lru, &dispose); 511 list_move(&inode->i_lru, &dispose);
514 list_del_init(&inode->i_wb_list); 512 list_del_init(&inode->i_wb_list);
515 if (!(inode->i_state & (I_DIRTY | I_SYNC))) 513 if (!(inode->i_state & (I_DIRTY | I_SYNC)))
516 percpu_counter_dec(&nr_inodes_unused); 514 inodes_stat.nr_unused--;
517 } 515 }
518 spin_unlock(&inode_lock); 516 spin_unlock(&inode_lock);
519 517
@@ -554,7 +552,7 @@ int invalidate_inodes(struct super_block *sb)
554 list_move(&inode->i_lru, &dispose); 552 list_move(&inode->i_lru, &dispose);
555 list_del_init(&inode->i_wb_list); 553 list_del_init(&inode->i_wb_list);
556 if (!(inode->i_state & (I_DIRTY | I_SYNC))) 554 if (!(inode->i_state & (I_DIRTY | I_SYNC)))
557 percpu_counter_dec(&nr_inodes_unused); 555 inodes_stat.nr_unused--;
558 } 556 }
559 spin_unlock(&inode_lock); 557 spin_unlock(&inode_lock);
560 558
@@ -616,7 +614,7 @@ static void prune_icache(int nr_to_scan)
616 if (atomic_read(&inode->i_count) || 614 if (atomic_read(&inode->i_count) ||
617 (inode->i_state & ~I_REFERENCED)) { 615 (inode->i_state & ~I_REFERENCED)) {
618 list_del_init(&inode->i_lru); 616 list_del_init(&inode->i_lru);
619 percpu_counter_dec(&nr_inodes_unused); 617 inodes_stat.nr_unused--;
620 continue; 618 continue;
621 } 619 }
622 620
@@ -650,7 +648,7 @@ static void prune_icache(int nr_to_scan)
650 */ 648 */
651 list_move(&inode->i_lru, &freeable); 649 list_move(&inode->i_lru, &freeable);
652 list_del_init(&inode->i_wb_list); 650 list_del_init(&inode->i_wb_list);
653 percpu_counter_dec(&nr_inodes_unused); 651 inodes_stat.nr_unused--;
654 } 652 }
655 if (current_is_kswapd()) 653 if (current_is_kswapd())
656 __count_vm_events(KSWAPD_INODESTEAL, reap); 654 __count_vm_events(KSWAPD_INODESTEAL, reap);
@@ -1649,7 +1647,6 @@ void __init inode_init(void)
1649 init_once); 1647 init_once);
1650 register_shrinker(&icache_shrinker); 1648 register_shrinker(&icache_shrinker);
1651 percpu_counter_init(&nr_inodes, 0); 1649 percpu_counter_init(&nr_inodes, 0);
1652 percpu_counter_init(&nr_inodes_unused, 0);
1653 1650
1654 /* Hash may have been set up in inode_init_early */ 1651 /* Hash may have been set up in inode_init_early */
1655 if (!hashdist) 1652 if (!hashdist)