diff options
author | Dave Chinner <dchinner@redhat.com> | 2013-08-27 20:17:54 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2013-09-10 18:56:30 -0400 |
commit | 62d36c77035219ac776d1882ed3a662f2b75f258 (patch) | |
tree | 407e530e870745fb1e2fba163375d4192bb29eca /fs/dcache.c | |
parent | 55f841ce9395a72c6285fbcc4c403c0c786e1c74 (diff) |
dcache: convert dentry_stat.nr_unused to per-cpu counters
Before we split up the dcache_lru_lock, the unused dentry counter needs to
be made independent of the global dcache_lru_lock. Convert it to per-cpu
counters to do this.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Glauber Costa <glommer@openvz.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r-- | fs/dcache.c | 30 |
1 files changed, 27 insertions, 3 deletions
diff --git a/fs/dcache.c b/fs/dcache.c index 6ef1c2e1bbc4..03161240e744 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -147,8 +147,22 @@ struct dentry_stat_t dentry_stat = { | |||
147 | }; | 147 | }; |
148 | 148 | ||
149 | static DEFINE_PER_CPU(long, nr_dentry); | 149 | static DEFINE_PER_CPU(long, nr_dentry); |
150 | static DEFINE_PER_CPU(long, nr_dentry_unused); | ||
150 | 151 | ||
151 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) | 152 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) |
153 | |||
154 | /* | ||
155 | * Here we resort to our own counters instead of using generic per-cpu counters | ||
156 | * for consistency with what the vfs inode code does. We are expected to harvest | ||
157 | * better code and performance by having our own specialized counters. | ||
158 | * | ||
159 | * Please note that the loop is done over all possible CPUs, not over all online | ||
160 | * CPUs. The reason for this is that we don't want to play games with CPUs going | ||
161 | * on and off. If one of them goes off, we will just keep their counters. | ||
162 | * | ||
163 | * glommer: See cffbc8a for details, and if you ever intend to change this, | ||
164 | * please update all vfs counters to match. | ||
165 | */ | ||
152 | static long get_nr_dentry(void) | 166 | static long get_nr_dentry(void) |
153 | { | 167 | { |
154 | int i; | 168 | int i; |
@@ -158,10 +172,20 @@ static long get_nr_dentry(void) | |||
158 | return sum < 0 ? 0 : sum; | 172 | return sum < 0 ? 0 : sum; |
159 | } | 173 | } |
160 | 174 | ||
175 | static long get_nr_dentry_unused(void) | ||
176 | { | ||
177 | int i; | ||
178 | long sum = 0; | ||
179 | for_each_possible_cpu(i) | ||
180 | sum += per_cpu(nr_dentry_unused, i); | ||
181 | return sum < 0 ? 0 : sum; | ||
182 | } | ||
183 | |||
161 | int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, | 184 | int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, |
162 | size_t *lenp, loff_t *ppos) | 185 | size_t *lenp, loff_t *ppos) |
163 | { | 186 | { |
164 | dentry_stat.nr_dentry = get_nr_dentry(); | 187 | dentry_stat.nr_dentry = get_nr_dentry(); |
188 | dentry_stat.nr_unused = get_nr_dentry_unused(); | ||
165 | return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | 189 | return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
166 | } | 190 | } |
167 | #endif | 191 | #endif |
@@ -342,7 +366,7 @@ static void dentry_lru_add(struct dentry *dentry) | |||
342 | dentry->d_flags |= DCACHE_LRU_LIST; | 366 | dentry->d_flags |= DCACHE_LRU_LIST; |
343 | list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); | 367 | list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); |
344 | dentry->d_sb->s_nr_dentry_unused++; | 368 | dentry->d_sb->s_nr_dentry_unused++; |
345 | dentry_stat.nr_unused++; | 369 | this_cpu_inc(nr_dentry_unused); |
346 | spin_unlock(&dcache_lru_lock); | 370 | spin_unlock(&dcache_lru_lock); |
347 | } | 371 | } |
348 | } | 372 | } |
@@ -352,7 +376,7 @@ static void __dentry_lru_del(struct dentry *dentry) | |||
352 | list_del_init(&dentry->d_lru); | 376 | list_del_init(&dentry->d_lru); |
353 | dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); | 377 | dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); |
354 | dentry->d_sb->s_nr_dentry_unused--; | 378 | dentry->d_sb->s_nr_dentry_unused--; |
355 | dentry_stat.nr_unused--; | 379 | this_cpu_dec(nr_dentry_unused); |
356 | } | 380 | } |
357 | 381 | ||
358 | /* | 382 | /* |
@@ -374,7 +398,7 @@ static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list) | |||
374 | dentry->d_flags |= DCACHE_LRU_LIST; | 398 | dentry->d_flags |= DCACHE_LRU_LIST; |
375 | list_add_tail(&dentry->d_lru, list); | 399 | list_add_tail(&dentry->d_lru, list); |
376 | dentry->d_sb->s_nr_dentry_unused++; | 400 | dentry->d_sb->s_nr_dentry_unused++; |
377 | dentry_stat.nr_unused++; | 401 | this_cpu_inc(nr_dentry_unused); |
378 | } else { | 402 | } else { |
379 | list_move_tail(&dentry->d_lru, list); | 403 | list_move_tail(&dentry->d_lru, list); |
380 | } | 404 | } |