aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2013-08-27 20:17:56 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-10 18:56:30 -0400
commit24f7c6b981fb70084757382da464ea85d72af300 (patch)
tree641ec828955f54b13641fadcee35b530989349a6 /mm
parentdd1f6b2e43a53ee58eb87d5e623cf44e277d005d (diff)
mm: new shrinker API
The current shrinker callout API uses an a single shrinker call for multiple functions. To determine the function, a special magical value is passed in a parameter to change the behaviour. This complicates the implementation and return value specification for the different behaviours. Separate the two different behaviours into separate operations, one to return a count of freeable objects in the cache, and another to scan a certain number of objects in the cache for freeing. In defining these new operations, ensure the return values and resultant behaviours are clearly defined and documented. Modify shrink_slab() to use the new API and implement the callouts for all the existing shrinkers. Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@parallels.com> Acked-by: Mel Gorman <mgorman@suse.de> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c60
1 files changed, 40 insertions, 20 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2cff0d491c6d..4d4e859b4b9c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -205,19 +205,24 @@ static inline int do_shrinker_shrink(struct shrinker *shrinker,
205 * 205 *
206 * Returns the number of slab objects which we shrunk. 206 * Returns the number of slab objects which we shrunk.
207 */ 207 */
208unsigned long shrink_slab(struct shrink_control *shrink, 208unsigned long shrink_slab(struct shrink_control *shrinkctl,
209 unsigned long nr_pages_scanned, 209 unsigned long nr_pages_scanned,
210 unsigned long lru_pages) 210 unsigned long lru_pages)
211{ 211{
212 struct shrinker *shrinker; 212 struct shrinker *shrinker;
213 unsigned long ret = 0; 213 unsigned long freed = 0;
214 214
215 if (nr_pages_scanned == 0) 215 if (nr_pages_scanned == 0)
216 nr_pages_scanned = SWAP_CLUSTER_MAX; 216 nr_pages_scanned = SWAP_CLUSTER_MAX;
217 217
218 if (!down_read_trylock(&shrinker_rwsem)) { 218 if (!down_read_trylock(&shrinker_rwsem)) {
219 /* Assume we'll be able to shrink next time */ 219 /*
220 ret = 1; 220 * If we would return 0, our callers would understand that we
221 * have nothing else to shrink and give up trying. By returning
222 * 1 we keep it going and assume we'll be able to shrink next
223 * time.
224 */
225 freed = 1;
221 goto out; 226 goto out;
222 } 227 }
223 228
@@ -225,14 +230,16 @@ unsigned long shrink_slab(struct shrink_control *shrink,
225 unsigned long long delta; 230 unsigned long long delta;
226 long total_scan; 231 long total_scan;
227 long max_pass; 232 long max_pass;
228 int shrink_ret = 0;
229 long nr; 233 long nr;
230 long new_nr; 234 long new_nr;
231 long batch_size = shrinker->batch ? shrinker->batch 235 long batch_size = shrinker->batch ? shrinker->batch
232 : SHRINK_BATCH; 236 : SHRINK_BATCH;
233 237
234 max_pass = do_shrinker_shrink(shrinker, shrink, 0); 238 if (shrinker->count_objects)
235 if (max_pass <= 0) 239 max_pass = shrinker->count_objects(shrinker, shrinkctl);
240 else
241 max_pass = do_shrinker_shrink(shrinker, shrinkctl, 0);
242 if (max_pass == 0)
236 continue; 243 continue;
237 244
238 /* 245 /*
@@ -248,8 +255,8 @@ unsigned long shrink_slab(struct shrink_control *shrink,
248 do_div(delta, lru_pages + 1); 255 do_div(delta, lru_pages + 1);
249 total_scan += delta; 256 total_scan += delta;
250 if (total_scan < 0) { 257 if (total_scan < 0) {
251 printk(KERN_ERR "shrink_slab: %pF negative objects to " 258 printk(KERN_ERR
252 "delete nr=%ld\n", 259 "shrink_slab: %pF negative objects to delete nr=%ld\n",
253 shrinker->shrink, total_scan); 260 shrinker->shrink, total_scan);
254 total_scan = max_pass; 261 total_scan = max_pass;
255 } 262 }
@@ -277,20 +284,33 @@ unsigned long shrink_slab(struct shrink_control *shrink,
277 if (total_scan > max_pass * 2) 284 if (total_scan > max_pass * 2)
278 total_scan = max_pass * 2; 285 total_scan = max_pass * 2;
279 286
280 trace_mm_shrink_slab_start(shrinker, shrink, nr, 287 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
281 nr_pages_scanned, lru_pages, 288 nr_pages_scanned, lru_pages,
282 max_pass, delta, total_scan); 289 max_pass, delta, total_scan);
283 290
284 while (total_scan >= batch_size) { 291 while (total_scan >= batch_size) {
285 int nr_before;
286 292
287 nr_before = do_shrinker_shrink(shrinker, shrink, 0); 293 if (shrinker->scan_objects) {
288 shrink_ret = do_shrinker_shrink(shrinker, shrink, 294 unsigned long ret;
289 batch_size); 295 shrinkctl->nr_to_scan = batch_size;
290 if (shrink_ret == -1) 296 ret = shrinker->scan_objects(shrinker, shrinkctl);
291 break; 297
292 if (shrink_ret < nr_before) 298 if (ret == SHRINK_STOP)
293 ret += nr_before - shrink_ret; 299 break;
300 freed += ret;
301 } else {
302 int nr_before;
303 long ret;
304
305 nr_before = do_shrinker_shrink(shrinker, shrinkctl, 0);
306 ret = do_shrinker_shrink(shrinker, shrinkctl,
307 batch_size);
308 if (ret == -1)
309 break;
310 if (ret < nr_before)
311 freed += nr_before - ret;
312 }
313
294 count_vm_events(SLABS_SCANNED, batch_size); 314 count_vm_events(SLABS_SCANNED, batch_size);
295 total_scan -= batch_size; 315 total_scan -= batch_size;
296 316
@@ -308,12 +328,12 @@ unsigned long shrink_slab(struct shrink_control *shrink,
308 else 328 else
309 new_nr = atomic_long_read(&shrinker->nr_in_batch); 329 new_nr = atomic_long_read(&shrinker->nr_in_batch);
310 330
311 trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr); 331 trace_mm_shrink_slab_end(shrinker, freed, nr, new_nr);
312 } 332 }
313 up_read(&shrinker_rwsem); 333 up_read(&shrinker_rwsem);
314out: 334out:
315 cond_resched(); 335 cond_resched();
316 return ret; 336 return freed;
317} 337}
318 338
319static inline int is_page_cache_freeable(struct page *page) 339static inline int is_page_cache_freeable(struct page *page)