diff options
author | Dave Chinner <dchinner@redhat.com> | 2011-07-08 00:14:37 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2011-07-20 01:44:32 -0400 |
commit | e9299f5058595a655c3b207cda9635e28b9197e6 (patch) | |
tree | b31a4dc5cab98ee1701313f45e92e583c2d76f63 /mm/vmscan.c | |
parent | 3567b59aa80ac4417002bf58e35dce5c777d4164 (diff) |
vmscan: add customisable shrinker batch size
For shrinkers that have their own cond_resched* calls, having
shrink_slab break the work down into small batches is not
paticularly efficient. Add a custom batchsize field to the struct
shrinker so that shrinkers can use a larger batch size if they
desire.
A value of zero (uninitialised) means "use the default", so
behaviour is unchanged by this patch.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 387422470c95..febbc044e792 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -253,6 +253,8 @@ unsigned long shrink_slab(struct shrink_control *shrink, | |||
253 | int shrink_ret = 0; | 253 | int shrink_ret = 0; |
254 | long nr; | 254 | long nr; |
255 | long new_nr; | 255 | long new_nr; |
256 | long batch_size = shrinker->batch ? shrinker->batch | ||
257 | : SHRINK_BATCH; | ||
256 | 258 | ||
257 | /* | 259 | /* |
258 | * copy the current shrinker scan count into a local variable | 260 | * copy the current shrinker scan count into a local variable |
@@ -303,19 +305,18 @@ unsigned long shrink_slab(struct shrink_control *shrink, | |||
303 | nr_pages_scanned, lru_pages, | 305 | nr_pages_scanned, lru_pages, |
304 | max_pass, delta, total_scan); | 306 | max_pass, delta, total_scan); |
305 | 307 | ||
306 | while (total_scan >= SHRINK_BATCH) { | 308 | while (total_scan >= batch_size) { |
307 | long this_scan = SHRINK_BATCH; | ||
308 | int nr_before; | 309 | int nr_before; |
309 | 310 | ||
310 | nr_before = do_shrinker_shrink(shrinker, shrink, 0); | 311 | nr_before = do_shrinker_shrink(shrinker, shrink, 0); |
311 | shrink_ret = do_shrinker_shrink(shrinker, shrink, | 312 | shrink_ret = do_shrinker_shrink(shrinker, shrink, |
312 | this_scan); | 313 | batch_size); |
313 | if (shrink_ret == -1) | 314 | if (shrink_ret == -1) |
314 | break; | 315 | break; |
315 | if (shrink_ret < nr_before) | 316 | if (shrink_ret < nr_before) |
316 | ret += nr_before - shrink_ret; | 317 | ret += nr_before - shrink_ret; |
317 | count_vm_events(SLABS_SCANNED, this_scan); | 318 | count_vm_events(SLABS_SCANNED, batch_size); |
318 | total_scan -= this_scan; | 319 | total_scan -= batch_size; |
319 | 320 | ||
320 | cond_resched(); | 321 | cond_resched(); |
321 | } | 322 | } |