summaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2016-11-09 14:38:14 -0500
committerJens Axboe <axboe@fb.com>2016-11-10 15:53:40 -0500
commit87760e5eef359788047d6fd54fc12eec74ce0d27 (patch)
tree0c394ea517cc093d8fe837ad5a7201d0d30c7afe /block/cfq-iosched.c
parente34cbd307477ae07c5d8a8d0bd15e65a9ddaba5c (diff)
block: hook up writeback throttling
Enable throttling of buffered writeback to make it a lot more smooth, and has way less impact on other system activity. Background writeback should be, by definition, background activity. The fact that we flush huge bundles of it at the time means that it potentially has heavy impacts on foreground workloads, which isn't ideal. We can't easily limit the sizes of writes that we do, since that would impact file system layout in the presence of delayed allocation. So just throttle back buffered writeback, unless someone is waiting for it. The algorithm for when to throttle takes its inspiration in the CoDel networking scheduling algorithm. Like CoDel, blk-wb monitors the minimum latencies of requests over a window of time. In that window of time, if the minimum latency of any request exceeds a given target, then a scale count is incremented and the queue depth is shrunk. The next monitoring window is shrunk accordingly. Unlike CoDel, if we hit a window that exhibits good behavior, then we simply increment the scale count and re-calculate the limits for that scale value. This prevents us from oscillating between a close-to-ideal value and max all the time, instead remaining in the windows where we get good behavior. Unlike CoDel, blk-wb allows the scale count to to negative. This happens if we primarily have writes going on. Unlike positive scale counts, this doesn't change the size of the monitoring window. When the heavy writers finish, blk-bw quickly snaps back to it's stable state of a zero scale count. The patch registers a sysfs entry, 'wb_lat_usec'. This sets the latency target to me met. It defaults to 2 msec for non-rotational storage, and 75 msec for rotational storage. Setting this value to '0' disables blk-wb. Generally, a user would not have to touch this setting. We don't enable WBT on devices that are managed with CFQ, and have a non-root block cgroup attached. If we have a proportional share setup on this particular disk, then the wbt throttling will interfere with that. We don't have a strong need for wbt for that case, since we will rely on CFQ doing that for us. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 61010511c5a0..e280d08ef6d7 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -16,6 +16,7 @@
16#include <linux/blktrace_api.h> 16#include <linux/blktrace_api.h>
17#include <linux/blk-cgroup.h> 17#include <linux/blk-cgroup.h>
18#include "blk.h" 18#include "blk.h"
19#include "blk-wbt.h"
19 20
20/* 21/*
21 * tunables 22 * tunables
@@ -3762,9 +3763,11 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3762 struct cfq_data *cfqd = cic_to_cfqd(cic); 3763 struct cfq_data *cfqd = cic_to_cfqd(cic);
3763 struct cfq_queue *cfqq; 3764 struct cfq_queue *cfqq;
3764 uint64_t serial_nr; 3765 uint64_t serial_nr;
3766 bool nonroot_cg;
3765 3767
3766 rcu_read_lock(); 3768 rcu_read_lock();
3767 serial_nr = bio_blkcg(bio)->css.serial_nr; 3769 serial_nr = bio_blkcg(bio)->css.serial_nr;
3770 nonroot_cg = bio_blkcg(bio) != &blkcg_root;
3768 rcu_read_unlock(); 3771 rcu_read_unlock();
3769 3772
3770 /* 3773 /*
@@ -3775,6 +3778,17 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3775 return; 3778 return;
3776 3779
3777 /* 3780 /*
3781 * If we have a non-root cgroup, we can depend on that to
3782 * do proper throttling of writes. Turn off wbt for that
3783 * case.
3784 */
3785 if (nonroot_cg) {
3786 struct request_queue *q = cfqd->queue;
3787
3788 wbt_disable(q->rq_wb);
3789 }
3790
3791 /*
3778 * Drop reference to queues. New queues will be assigned in new 3792 * Drop reference to queues. New queues will be assigned in new
3779 * group upon arrival of fresh requests. 3793 * group upon arrival of fresh requests.
3780 */ 3794 */