summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2011-07-08 04:14:37 (GMT)
committerAl Viro <viro@zeniv.linux.org.uk>2011-07-20 05:44:32 (GMT)
commite9299f5058595a655c3b207cda9635e28b9197e6 (patch)
treeb31a4dc5cab98ee1701313f45e92e583c2d76f63 /mm
parent3567b59aa80ac4417002bf58e35dce5c777d4164 (diff)
downloadlinux-fsl-qoriq-e9299f5058595a655c3b207cda9635e28b9197e6.tar.xz
vmscan: add customisable shrinker batch size
For shrinkers that have their own cond_resched* calls, having shrink_slab break the work down into small batches is not paticularly efficient. Add a custom batchsize field to the struct shrinker so that shrinkers can use a larger batch size if they desire. A value of zero (uninitialised) means "use the default", so behaviour is unchanged by this patch. Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3874224..febbc04 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -253,6 +253,8 @@ unsigned long shrink_slab(struct shrink_control *shrink,
int shrink_ret = 0;
long nr;
long new_nr;
+ long batch_size = shrinker->batch ? shrinker->batch
+ : SHRINK_BATCH;
/*
* copy the current shrinker scan count into a local variable
@@ -303,19 +305,18 @@ unsigned long shrink_slab(struct shrink_control *shrink,
nr_pages_scanned, lru_pages,
max_pass, delta, total_scan);
- while (total_scan >= SHRINK_BATCH) {
- long this_scan = SHRINK_BATCH;
+ while (total_scan >= batch_size) {
int nr_before;
nr_before = do_shrinker_shrink(shrinker, shrink, 0);
shrink_ret = do_shrinker_shrink(shrinker, shrink,
- this_scan);
+ batch_size);
if (shrink_ret == -1)
break;
if (shrink_ret < nr_before)
ret += nr_before - shrink_ret;
- count_vm_events(SLABS_SCANNED, this_scan);
- total_scan -= this_scan;
+ count_vm_events(SLABS_SCANNED, batch_size);
+ total_scan -= batch_size;
cond_resched();
}