diff options
author | Josef Bacik <josef@toxicpanda.com> | 2018-12-04 12:59:04 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-12-08 00:26:38 -0500 |
commit | d3fcdff19054575a368dfdac7407cabffea36c43 (patch) | |
tree | 0d65fe0426e7a00cc381cf5f0ed79c898347880d /block | |
parent | b6c7b58f5fcc2386bddf9852011c42c1d2b83979 (diff) |
block: convert io-latency to use rq_qos_wait
Now that we have this common helper, convert io-latency over to use it
as well.
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-iolatency.c | 31 |
1 files changed, 8 insertions, 23 deletions
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index 0b14c3d57769..bee092727cad 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c | |||
@@ -262,15 +262,15 @@ static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat, | |||
262 | stat->rqs.mean); | 262 | stat->rqs.mean); |
263 | } | 263 | } |
264 | 264 | ||
265 | static inline bool iolatency_may_queue(struct iolatency_grp *iolat, | 265 | static void iolat_cleanup_cb(struct rq_wait *rqw, void *private_data) |
266 | wait_queue_entry_t *wait, | ||
267 | bool first_block) | ||
268 | { | 266 | { |
269 | struct rq_wait *rqw = &iolat->rq_wait; | 267 | atomic_dec(&rqw->inflight); |
268 | wake_up(&rqw->wait); | ||
269 | } | ||
270 | 270 | ||
271 | if (first_block && waitqueue_active(&rqw->wait) && | 271 | static bool iolat_acquire_inflight(struct rq_wait *rqw, void *private_data) |
272 | rqw->wait.head.next != &wait->entry) | 272 | { |
273 | return false; | 273 | struct iolatency_grp *iolat = private_data; |
274 | return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth); | 274 | return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth); |
275 | } | 275 | } |
276 | 276 | ||
@@ -281,8 +281,6 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos, | |||
281 | { | 281 | { |
282 | struct rq_wait *rqw = &iolat->rq_wait; | 282 | struct rq_wait *rqw = &iolat->rq_wait; |
283 | unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay); | 283 | unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay); |
284 | DEFINE_WAIT(wait); | ||
285 | bool first_block = true; | ||
286 | 284 | ||
287 | if (use_delay) | 285 | if (use_delay) |
288 | blkcg_schedule_throttle(rqos->q, use_memdelay); | 286 | blkcg_schedule_throttle(rqos->q, use_memdelay); |
@@ -299,20 +297,7 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos, | |||
299 | return; | 297 | return; |
300 | } | 298 | } |
301 | 299 | ||
302 | if (iolatency_may_queue(iolat, &wait, first_block)) | 300 | rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb); |
303 | return; | ||
304 | |||
305 | do { | ||
306 | prepare_to_wait_exclusive(&rqw->wait, &wait, | ||
307 | TASK_UNINTERRUPTIBLE); | ||
308 | |||
309 | if (iolatency_may_queue(iolat, &wait, first_block)) | ||
310 | break; | ||
311 | first_block = false; | ||
312 | io_schedule(); | ||
313 | } while (1); | ||
314 | |||
315 | finish_wait(&rqw->wait, &wait); | ||
316 | } | 301 | } |
317 | 302 | ||
318 | #define SCALE_DOWN_FACTOR 2 | 303 | #define SCALE_DOWN_FACTOR 2 |