aboutsummaryrefslogtreecommitdiffstats
path: root/fs/mbcache.c
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruen@suse.de>2010-07-21 13:44:45 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2010-08-09 16:48:47 -0400
commite566d48c9bd56f57e25e855a21e06ca2c2525795 (patch)
treea1520539bab91b693a00eff3ca11340b8ae9038a /fs/mbcache.c
parent2aec7c523291621ebb68ba8e0bd9b52a26bb76ee (diff)
mbcache: fix shrinker function return value
The shrinker function is supposed to return the number of cache entries after shrinking, not before shrinking. Fix that. Based on a patch from Wang Sheng-Hui <crosslonelyover@gmail.com>. Signed-off-by: Andreas Gruenbacher <agruen@suse.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/mbcache.c')
-rw-r--r--fs/mbcache.c27
1 files changed, 10 insertions, 17 deletions
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 8a2cbd823079..cf4e6cdfd15b 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -176,22 +176,12 @@ static int
176mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 176mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
177{ 177{
178 LIST_HEAD(free_list); 178 LIST_HEAD(free_list);
179 struct list_head *l, *ltmp; 179 struct mb_cache *cache;
180 struct mb_cache_entry *entry, *tmp;
180 int count = 0; 181 int count = 0;
181 182
182 spin_lock(&mb_cache_spinlock);
183 list_for_each(l, &mb_cache_list) {
184 struct mb_cache *cache =
185 list_entry(l, struct mb_cache, c_cache_list);
186 mb_debug("cache %s (%d)", cache->c_name,
187 atomic_read(&cache->c_entry_count));
188 count += atomic_read(&cache->c_entry_count);
189 }
190 mb_debug("trying to free %d entries", nr_to_scan); 183 mb_debug("trying to free %d entries", nr_to_scan);
191 if (nr_to_scan == 0) { 184 spin_lock(&mb_cache_spinlock);
192 spin_unlock(&mb_cache_spinlock);
193 goto out;
194 }
195 while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) { 185 while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
196 struct mb_cache_entry *ce = 186 struct mb_cache_entry *ce =
197 list_entry(mb_cache_lru_list.next, 187 list_entry(mb_cache_lru_list.next,
@@ -199,12 +189,15 @@ mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
199 list_move_tail(&ce->e_lru_list, &free_list); 189 list_move_tail(&ce->e_lru_list, &free_list);
200 __mb_cache_entry_unhash(ce); 190 __mb_cache_entry_unhash(ce);
201 } 191 }
192 list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
193 mb_debug("cache %s (%d)", cache->c_name,
194 atomic_read(&cache->c_entry_count));
195 count += atomic_read(&cache->c_entry_count);
196 }
202 spin_unlock(&mb_cache_spinlock); 197 spin_unlock(&mb_cache_spinlock);
203 list_for_each_safe(l, ltmp, &free_list) { 198 list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
204 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, 199 __mb_cache_entry_forget(entry, gfp_mask);
205 e_lru_list), gfp_mask);
206 } 200 }
207out:
208 return (count / 100) * sysctl_vfs_cache_pressure; 201 return (count / 100) * sysctl_vfs_cache_pressure;
209} 202}
210 203