diff options
author | Dave Chinner <dchinner@redhat.com> | 2011-07-08 00:14:38 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2011-07-20 01:44:34 -0400 |
commit | fcb94f72d3e0f4f34b326c2986da8e5996daf72c (patch) | |
tree | a18da3a56f447264c0dc2501a41afc5890afc78c /fs | |
parent | e9299f5058595a655c3b207cda9635e28b9197e6 (diff) |
inode: convert inode_stat.nr_unused to per-cpu counters
Before we split up the inode_lru_lock, the unused inode counter
needs to be made independent of the global inode_lru_lock. Convert
it to per-cpu counters to do this.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/inode.c | 16 |
1 files changed, 11 insertions, 5 deletions
diff --git a/fs/inode.c b/fs/inode.c index cbdcab88105f..9a0361121712 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -95,6 +95,7 @@ EXPORT_SYMBOL(empty_aops); | |||
95 | struct inodes_stat_t inodes_stat; | 95 | struct inodes_stat_t inodes_stat; |
96 | 96 | ||
97 | static DEFINE_PER_CPU(unsigned int, nr_inodes); | 97 | static DEFINE_PER_CPU(unsigned int, nr_inodes); |
98 | static DEFINE_PER_CPU(unsigned int, nr_unused); | ||
98 | 99 | ||
99 | static struct kmem_cache *inode_cachep __read_mostly; | 100 | static struct kmem_cache *inode_cachep __read_mostly; |
100 | 101 | ||
@@ -109,7 +110,11 @@ static int get_nr_inodes(void) | |||
109 | 110 | ||
110 | static inline int get_nr_inodes_unused(void) | 111 | static inline int get_nr_inodes_unused(void) |
111 | { | 112 | { |
112 | return inodes_stat.nr_unused; | 113 | int i; |
114 | int sum = 0; | ||
115 | for_each_possible_cpu(i) | ||
116 | sum += per_cpu(nr_unused, i); | ||
117 | return sum < 0 ? 0 : sum; | ||
113 | } | 118 | } |
114 | 119 | ||
115 | int get_nr_dirty_inodes(void) | 120 | int get_nr_dirty_inodes(void) |
@@ -127,6 +132,7 @@ int proc_nr_inodes(ctl_table *table, int write, | |||
127 | void __user *buffer, size_t *lenp, loff_t *ppos) | 132 | void __user *buffer, size_t *lenp, loff_t *ppos) |
128 | { | 133 | { |
129 | inodes_stat.nr_inodes = get_nr_inodes(); | 134 | inodes_stat.nr_inodes = get_nr_inodes(); |
135 | inodes_stat.nr_unused = get_nr_inodes_unused(); | ||
130 | return proc_dointvec(table, write, buffer, lenp, ppos); | 136 | return proc_dointvec(table, write, buffer, lenp, ppos); |
131 | } | 137 | } |
132 | #endif | 138 | #endif |
@@ -340,7 +346,7 @@ static void inode_lru_list_add(struct inode *inode) | |||
340 | spin_lock(&inode_lru_lock); | 346 | spin_lock(&inode_lru_lock); |
341 | if (list_empty(&inode->i_lru)) { | 347 | if (list_empty(&inode->i_lru)) { |
342 | list_add(&inode->i_lru, &inode_lru); | 348 | list_add(&inode->i_lru, &inode_lru); |
343 | inodes_stat.nr_unused++; | 349 | this_cpu_inc(nr_unused); |
344 | } | 350 | } |
345 | spin_unlock(&inode_lru_lock); | 351 | spin_unlock(&inode_lru_lock); |
346 | } | 352 | } |
@@ -350,7 +356,7 @@ static void inode_lru_list_del(struct inode *inode) | |||
350 | spin_lock(&inode_lru_lock); | 356 | spin_lock(&inode_lru_lock); |
351 | if (!list_empty(&inode->i_lru)) { | 357 | if (!list_empty(&inode->i_lru)) { |
352 | list_del_init(&inode->i_lru); | 358 | list_del_init(&inode->i_lru); |
353 | inodes_stat.nr_unused--; | 359 | this_cpu_dec(nr_unused); |
354 | } | 360 | } |
355 | spin_unlock(&inode_lru_lock); | 361 | spin_unlock(&inode_lru_lock); |
356 | } | 362 | } |
@@ -656,7 +662,7 @@ static void prune_icache(int nr_to_scan) | |||
656 | (inode->i_state & ~I_REFERENCED)) { | 662 | (inode->i_state & ~I_REFERENCED)) { |
657 | list_del_init(&inode->i_lru); | 663 | list_del_init(&inode->i_lru); |
658 | spin_unlock(&inode->i_lock); | 664 | spin_unlock(&inode->i_lock); |
659 | inodes_stat.nr_unused--; | 665 | this_cpu_dec(nr_unused); |
660 | continue; | 666 | continue; |
661 | } | 667 | } |
662 | 668 | ||
@@ -693,7 +699,7 @@ static void prune_icache(int nr_to_scan) | |||
693 | spin_unlock(&inode->i_lock); | 699 | spin_unlock(&inode->i_lock); |
694 | 700 | ||
695 | list_move(&inode->i_lru, &freeable); | 701 | list_move(&inode->i_lru, &freeable); |
696 | inodes_stat.nr_unused--; | 702 | this_cpu_dec(nr_unused); |
697 | } | 703 | } |
698 | if (current_is_kswapd()) | 704 | if (current_is_kswapd()) |
699 | __count_vm_events(KSWAPD_INODESTEAL, reap); | 705 | __count_vm_events(KSWAPD_INODESTEAL, reap); |