aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c30
1 files changed, 27 insertions, 3 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 6ef1c2e1bbc4..03161240e744 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -147,8 +147,22 @@ struct dentry_stat_t dentry_stat = {
147}; 147};
148 148
149static DEFINE_PER_CPU(long, nr_dentry); 149static DEFINE_PER_CPU(long, nr_dentry);
150static DEFINE_PER_CPU(long, nr_dentry_unused);
150 151
151#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 152#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
153
154/*
155 * Here we resort to our own counters instead of using generic per-cpu counters
156 * for consistency with what the vfs inode code does. We are expected to harvest
157 * better code and performance by having our own specialized counters.
158 *
159 * Please note that the loop is done over all possible CPUs, not over all online
160 * CPUs. The reason for this is that we don't want to play games with CPUs going
161 * on and off. If one of them goes off, we will just keep their counters.
162 *
163 * glommer: See cffbc8a for details, and if you ever intend to change this,
164 * please update all vfs counters to match.
165 */
152static long get_nr_dentry(void) 166static long get_nr_dentry(void)
153{ 167{
154 int i; 168 int i;
@@ -158,10 +172,20 @@ static long get_nr_dentry(void)
158 return sum < 0 ? 0 : sum; 172 return sum < 0 ? 0 : sum;
159} 173}
160 174
175static long get_nr_dentry_unused(void)
176{
177 int i;
178 long sum = 0;
179 for_each_possible_cpu(i)
180 sum += per_cpu(nr_dentry_unused, i);
181 return sum < 0 ? 0 : sum;
182}
183
161int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, 184int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
162 size_t *lenp, loff_t *ppos) 185 size_t *lenp, loff_t *ppos)
163{ 186{
164 dentry_stat.nr_dentry = get_nr_dentry(); 187 dentry_stat.nr_dentry = get_nr_dentry();
188 dentry_stat.nr_unused = get_nr_dentry_unused();
165 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 189 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
166} 190}
167#endif 191#endif
@@ -342,7 +366,7 @@ static void dentry_lru_add(struct dentry *dentry)
342 dentry->d_flags |= DCACHE_LRU_LIST; 366 dentry->d_flags |= DCACHE_LRU_LIST;
343 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 367 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
344 dentry->d_sb->s_nr_dentry_unused++; 368 dentry->d_sb->s_nr_dentry_unused++;
345 dentry_stat.nr_unused++; 369 this_cpu_inc(nr_dentry_unused);
346 spin_unlock(&dcache_lru_lock); 370 spin_unlock(&dcache_lru_lock);
347 } 371 }
348} 372}
@@ -352,7 +376,7 @@ static void __dentry_lru_del(struct dentry *dentry)
352 list_del_init(&dentry->d_lru); 376 list_del_init(&dentry->d_lru);
353 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); 377 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
354 dentry->d_sb->s_nr_dentry_unused--; 378 dentry->d_sb->s_nr_dentry_unused--;
355 dentry_stat.nr_unused--; 379 this_cpu_dec(nr_dentry_unused);
356} 380}
357 381
358/* 382/*
@@ -374,7 +398,7 @@ static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
374 dentry->d_flags |= DCACHE_LRU_LIST; 398 dentry->d_flags |= DCACHE_LRU_LIST;
375 list_add_tail(&dentry->d_lru, list); 399 list_add_tail(&dentry->d_lru, list);
376 dentry->d_sb->s_nr_dentry_unused++; 400 dentry->d_sb->s_nr_dentry_unused++;
377 dentry_stat.nr_unused++; 401 this_cpu_inc(nr_dentry_unused);
378 } else { 402 } else {
379 list_move_tail(&dentry->d_lru, list); 403 list_move_tail(&dentry->d_lru, list);
380 } 404 }