aboutsummaryrefslogtreecommitdiffstats
path: root/fs/mbcache.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2013-08-27 20:18:09 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-10 18:56:31 -0400
commit1ab6c4997e04a00c50c6d786c2f046adc0d1f5de (patch)
tree55561fc74c062a8ed0e03fe56f54d7db9cfd9e12 /fs/mbcache.c
parent35163417fb7a55a24b6b0ebb102e9991adf309aa (diff)
fs: convert fs shrinkers to new scan/count API
Convert the filesystem shrinkers to use the new API, and standardise some of the behaviours of the shrinkers at the same time. For example, nr_to_scan means the number of objects to scan, not the number of objects to free. I refactored the CIFS idmap shrinker a little - it really needs to be broken up into a shrinker per tree and keep an item count with the tree root so that we don't need to walk the tree every time the shrinker needs to count the number of objects in the tree (i.e. all the time under memory pressure). [glommer@openvz.org: fixes for ext4, ubifs, nfs, cifs and glock. Fixes are needed mainly due to new code merged in the tree] [assorted fixes folded in] Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@openvz.org> Acked-by: Mel Gorman <mgorman@suse.de> Acked-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Acked-by: Jan Kara <jack@suse.cz> Acked-by: Steven Whitehouse <swhiteho@redhat.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/mbcache.c')
-rw-r--r--fs/mbcache.c47
1 files changed, 26 insertions, 21 deletions
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 5eb04767cb29..e519e45bf673 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -86,18 +86,6 @@ static LIST_HEAD(mb_cache_list);
86static LIST_HEAD(mb_cache_lru_list); 86static LIST_HEAD(mb_cache_lru_list);
87static DEFINE_SPINLOCK(mb_cache_spinlock); 87static DEFINE_SPINLOCK(mb_cache_spinlock);
88 88
89/*
90 * What the mbcache registers as to get shrunk dynamically.
91 */
92
93static int mb_cache_shrink_fn(struct shrinker *shrink,
94 struct shrink_control *sc);
95
96static struct shrinker mb_cache_shrinker = {
97 .shrink = mb_cache_shrink_fn,
98 .seeks = DEFAULT_SEEKS,
99};
100
101static inline int 89static inline int
102__mb_cache_entry_is_hashed(struct mb_cache_entry *ce) 90__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
103{ 91{
@@ -151,7 +139,7 @@ forget:
151 139
152 140
153/* 141/*
154 * mb_cache_shrink_fn() memory pressure callback 142 * mb_cache_shrink_scan() memory pressure callback
155 * 143 *
156 * This function is called by the kernel memory management when memory 144 * This function is called by the kernel memory management when memory
157 * gets low. 145 * gets low.
@@ -159,17 +147,16 @@ forget:
159 * @shrink: (ignored) 147 * @shrink: (ignored)
160 * @sc: shrink_control passed from reclaim 148 * @sc: shrink_control passed from reclaim
161 * 149 *
162 * Returns the number of objects which are present in the cache. 150 * Returns the number of objects freed.
163 */ 151 */
164static int 152static unsigned long
165mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc) 153mb_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
166{ 154{
167 LIST_HEAD(free_list); 155 LIST_HEAD(free_list);
168 struct mb_cache *cache;
169 struct mb_cache_entry *entry, *tmp; 156 struct mb_cache_entry *entry, *tmp;
170 int count = 0;
171 int nr_to_scan = sc->nr_to_scan; 157 int nr_to_scan = sc->nr_to_scan;
172 gfp_t gfp_mask = sc->gfp_mask; 158 gfp_t gfp_mask = sc->gfp_mask;
159 unsigned long freed = 0;
173 160
174 mb_debug("trying to free %d entries", nr_to_scan); 161 mb_debug("trying to free %d entries", nr_to_scan);
175 spin_lock(&mb_cache_spinlock); 162 spin_lock(&mb_cache_spinlock);
@@ -179,19 +166,37 @@ mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc)
179 struct mb_cache_entry, e_lru_list); 166 struct mb_cache_entry, e_lru_list);
180 list_move_tail(&ce->e_lru_list, &free_list); 167 list_move_tail(&ce->e_lru_list, &free_list);
181 __mb_cache_entry_unhash(ce); 168 __mb_cache_entry_unhash(ce);
169 freed++;
170 }
171 spin_unlock(&mb_cache_spinlock);
172 list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
173 __mb_cache_entry_forget(entry, gfp_mask);
182 } 174 }
175 return freed;
176}
177
178static unsigned long
179mb_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
180{
181 struct mb_cache *cache;
182 unsigned long count = 0;
183
184 spin_lock(&mb_cache_spinlock);
183 list_for_each_entry(cache, &mb_cache_list, c_cache_list) { 185 list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
184 mb_debug("cache %s (%d)", cache->c_name, 186 mb_debug("cache %s (%d)", cache->c_name,
185 atomic_read(&cache->c_entry_count)); 187 atomic_read(&cache->c_entry_count));
186 count += atomic_read(&cache->c_entry_count); 188 count += atomic_read(&cache->c_entry_count);
187 } 189 }
188 spin_unlock(&mb_cache_spinlock); 190 spin_unlock(&mb_cache_spinlock);
189 list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) { 191
190 __mb_cache_entry_forget(entry, gfp_mask);
191 }
192 return vfs_pressure_ratio(count); 192 return vfs_pressure_ratio(count);
193} 193}
194 194
195static struct shrinker mb_cache_shrinker = {
196 .count_objects = mb_cache_shrink_count,
197 .scan_objects = mb_cache_shrink_scan,
198 .seeks = DEFAULT_SEEKS,
199};
195 200
196/* 201/*
197 * mb_cache_create() create a new cache 202 * mb_cache_create() create a new cache