summaryrefslogtreecommitdiffstats
path: root/block/blk-wbt.c
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@fb.com>2018-07-03 11:32:35 -0400
committerJens Axboe <axboe@kernel.dk>2018-07-09 11:07:54 -0400
commita79050434b45959f397042080fd1d70ffa9bd9df (patch)
treed5689153d497925d326a8b7e9963f4c3f88685ea /block/blk-wbt.c
parent2ecbf456352d0699f51b4c6d70ea5bf29766579c (diff)
blk-rq-qos: refactor out common elements of blk-wbt
blkcg-qos is going to do essentially what wbt does, only on a cgroup basis. Break out the common code that will be shared between blkcg-qos and wbt into blk-rq-qos.* so they can both utilize the same infrastructure. Signed-off-by: Josef Bacik <jbacik@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-wbt.c')
-rw-r--r--block/blk-wbt.c326
1 files changed, 144 insertions, 182 deletions
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 4f89b28fa652..6fe20fb823e4 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -25,6 +25,7 @@
25#include <linux/swap.h> 25#include <linux/swap.h>
26 26
27#include "blk-wbt.h" 27#include "blk-wbt.h"
28#include "blk-rq-qos.h"
28 29
29#define CREATE_TRACE_POINTS 30#define CREATE_TRACE_POINTS
30#include <trace/events/wbt.h> 31#include <trace/events/wbt.h>
@@ -78,28 +79,6 @@ static inline bool rwb_enabled(struct rq_wb *rwb)
78 return rwb && rwb->wb_normal != 0; 79 return rwb && rwb->wb_normal != 0;
79} 80}
80 81
81/*
82 * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
83 * false if 'v' + 1 would be bigger than 'below'.
84 */
85static bool atomic_inc_below(atomic_t *v, int below)
86{
87 int cur = atomic_read(v);
88
89 for (;;) {
90 int old;
91
92 if (cur >= below)
93 return false;
94 old = atomic_cmpxchg(v, cur, cur + 1);
95 if (old == cur)
96 break;
97 cur = old;
98 }
99
100 return true;
101}
102
103static void wb_timestamp(struct rq_wb *rwb, unsigned long *var) 82static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
104{ 83{
105 if (rwb_enabled(rwb)) { 84 if (rwb_enabled(rwb)) {
@@ -116,7 +95,7 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
116 */ 95 */
117static bool wb_recent_wait(struct rq_wb *rwb) 96static bool wb_recent_wait(struct rq_wb *rwb)
118{ 97{
119 struct bdi_writeback *wb = &rwb->queue->backing_dev_info->wb; 98 struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
120 99
121 return time_before(jiffies, wb->dirty_sleep + HZ); 100 return time_before(jiffies, wb->dirty_sleep + HZ);
122} 101}
@@ -144,8 +123,9 @@ static void rwb_wake_all(struct rq_wb *rwb)
144 } 123 }
145} 124}
146 125
147void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct) 126static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
148{ 127{
128 struct rq_wb *rwb = RQWB(rqos);
149 struct rq_wait *rqw; 129 struct rq_wait *rqw;
150 int inflight, limit; 130 int inflight, limit;
151 131
@@ -194,10 +174,9 @@ void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct)
194 * Called on completion of a request. Note that it's also called when 174 * Called on completion of a request. Note that it's also called when
195 * a request is merged, when the request gets freed. 175 * a request is merged, when the request gets freed.
196 */ 176 */
197void wbt_done(struct rq_wb *rwb, struct request *rq) 177static void wbt_done(struct rq_qos *rqos, struct request *rq)
198{ 178{
199 if (!rwb) 179 struct rq_wb *rwb = RQWB(rqos);
200 return;
201 180
202 if (!wbt_is_tracked(rq)) { 181 if (!wbt_is_tracked(rq)) {
203 if (rwb->sync_cookie == rq) { 182 if (rwb->sync_cookie == rq) {
@@ -209,72 +188,11 @@ void wbt_done(struct rq_wb *rwb, struct request *rq)
209 wb_timestamp(rwb, &rwb->last_comp); 188 wb_timestamp(rwb, &rwb->last_comp);
210 } else { 189 } else {
211 WARN_ON_ONCE(rq == rwb->sync_cookie); 190 WARN_ON_ONCE(rq == rwb->sync_cookie);
212 __wbt_done(rwb, wbt_flags(rq)); 191 __wbt_done(rqos, wbt_flags(rq));
213 } 192 }
214 wbt_clear_state(rq); 193 wbt_clear_state(rq);
215} 194}
216 195
217/*
218 * Return true, if we can't increase the depth further by scaling
219 */
220static bool calc_wb_limits(struct rq_wb *rwb)
221{
222 unsigned int depth;
223 bool ret = false;
224
225 if (!rwb->min_lat_nsec) {
226 rwb->wb_max = rwb->wb_normal = rwb->wb_background = 0;
227 return false;
228 }
229
230 /*
231 * For QD=1 devices, this is a special case. It's important for those
232 * to have one request ready when one completes, so force a depth of
233 * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
234 * since the device can't have more than that in flight. If we're
235 * scaling down, then keep a setting of 1/1/1.
236 */
237 if (rwb->queue_depth == 1) {
238 if (rwb->scale_step > 0)
239 rwb->wb_max = rwb->wb_normal = 1;
240 else {
241 rwb->wb_max = rwb->wb_normal = 2;
242 ret = true;
243 }
244 rwb->wb_background = 1;
245 } else {
246 /*
247 * scale_step == 0 is our default state. If we have suffered
248 * latency spikes, step will be > 0, and we shrink the
249 * allowed write depths. If step is < 0, we're only doing
250 * writes, and we allow a temporarily higher depth to
251 * increase performance.
252 */
253 depth = min_t(unsigned int, RWB_DEF_DEPTH, rwb->queue_depth);
254 if (rwb->scale_step > 0)
255 depth = 1 + ((depth - 1) >> min(31, rwb->scale_step));
256 else if (rwb->scale_step < 0) {
257 unsigned int maxd = 3 * rwb->queue_depth / 4;
258
259 depth = 1 + ((depth - 1) << -rwb->scale_step);
260 if (depth > maxd) {
261 depth = maxd;
262 ret = true;
263 }
264 }
265
266 /*
267 * Set our max/normal/bg queue depths based on how far
268 * we have scaled down (->scale_step).
269 */
270 rwb->wb_max = depth;
271 rwb->wb_normal = (rwb->wb_max + 1) / 2;
272 rwb->wb_background = (rwb->wb_max + 3) / 4;
273 }
274
275 return ret;
276}
277
278static inline bool stat_sample_valid(struct blk_rq_stat *stat) 196static inline bool stat_sample_valid(struct blk_rq_stat *stat)
279{ 197{
280 /* 198 /*
@@ -307,7 +225,8 @@ enum {
307 225
308static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat) 226static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
309{ 227{
310 struct backing_dev_info *bdi = rwb->queue->backing_dev_info; 228 struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
229 struct rq_depth *rqd = &rwb->rq_depth;
311 u64 thislat; 230 u64 thislat;
312 231
313 /* 232 /*
@@ -351,7 +270,7 @@ static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
351 return LAT_EXCEEDED; 270 return LAT_EXCEEDED;
352 } 271 }
353 272
354 if (rwb->scale_step) 273 if (rqd->scale_step)
355 trace_wbt_stat(bdi, stat); 274 trace_wbt_stat(bdi, stat);
356 275
357 return LAT_OK; 276 return LAT_OK;
@@ -359,58 +278,48 @@ static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
359 278
360static void rwb_trace_step(struct rq_wb *rwb, const char *msg) 279static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
361{ 280{
362 struct backing_dev_info *bdi = rwb->queue->backing_dev_info; 281 struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
282 struct rq_depth *rqd = &rwb->rq_depth;
363 283
364 trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec, 284 trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
365 rwb->wb_background, rwb->wb_normal, rwb->wb_max); 285 rwb->wb_background, rwb->wb_normal, rqd->max_depth);
366} 286}
367 287
368static void scale_up(struct rq_wb *rwb) 288static void calc_wb_limits(struct rq_wb *rwb)
369{ 289{
370 /* 290 if (rwb->min_lat_nsec == 0) {
371 * Hit max in previous round, stop here 291 rwb->wb_normal = rwb->wb_background = 0;
372 */ 292 } else if (rwb->rq_depth.max_depth <= 2) {
373 if (rwb->scaled_max) 293 rwb->wb_normal = rwb->rq_depth.max_depth;
374 return; 294 rwb->wb_background = 1;
295 } else {
296 rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
297 rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
298 }
299}
375 300
376 rwb->scale_step--; 301static void scale_up(struct rq_wb *rwb)
302{
303 rq_depth_scale_up(&rwb->rq_depth);
304 calc_wb_limits(rwb);
377 rwb->unknown_cnt = 0; 305 rwb->unknown_cnt = 0;
378 306 rwb_trace_step(rwb, "scale up");
379 rwb->scaled_max = calc_wb_limits(rwb);
380
381 rwb_wake_all(rwb);
382
383 rwb_trace_step(rwb, "step up");
384} 307}
385 308
386/*
387 * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
388 * had a latency violation.
389 */
390static void scale_down(struct rq_wb *rwb, bool hard_throttle) 309static void scale_down(struct rq_wb *rwb, bool hard_throttle)
391{ 310{
392 /* 311 rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
393 * Stop scaling down when we've hit the limit. This also prevents
394 * ->scale_step from going to crazy values, if the device can't
395 * keep up.
396 */
397 if (rwb->wb_max == 1)
398 return;
399
400 if (rwb->scale_step < 0 && hard_throttle)
401 rwb->scale_step = 0;
402 else
403 rwb->scale_step++;
404
405 rwb->scaled_max = false;
406 rwb->unknown_cnt = 0;
407 calc_wb_limits(rwb); 312 calc_wb_limits(rwb);
408 rwb_trace_step(rwb, "step down"); 313 rwb->unknown_cnt = 0;
314 rwb_wake_all(rwb);
315 rwb_trace_step(rwb, "scale down");
409} 316}
410 317
411static void rwb_arm_timer(struct rq_wb *rwb) 318static void rwb_arm_timer(struct rq_wb *rwb)
412{ 319{
413 if (rwb->scale_step > 0) { 320 struct rq_depth *rqd = &rwb->rq_depth;
321
322 if (rqd->scale_step > 0) {
414 /* 323 /*
415 * We should speed this up, using some variant of a fast 324 * We should speed this up, using some variant of a fast
416 * integer inverse square root calculation. Since we only do 325 * integer inverse square root calculation. Since we only do
@@ -418,7 +327,7 @@ static void rwb_arm_timer(struct rq_wb *rwb)
418 * though. 327 * though.
419 */ 328 */
420 rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4, 329 rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
421 int_sqrt((rwb->scale_step + 1) << 8)); 330 int_sqrt((rqd->scale_step + 1) << 8));
422 } else { 331 } else {
423 /* 332 /*
424 * For step < 0, we don't want to increase/decrease the 333 * For step < 0, we don't want to increase/decrease the
@@ -433,12 +342,13 @@ static void rwb_arm_timer(struct rq_wb *rwb)
433static void wb_timer_fn(struct blk_stat_callback *cb) 342static void wb_timer_fn(struct blk_stat_callback *cb)
434{ 343{
435 struct rq_wb *rwb = cb->data; 344 struct rq_wb *rwb = cb->data;
345 struct rq_depth *rqd = &rwb->rq_depth;
436 unsigned int inflight = wbt_inflight(rwb); 346 unsigned int inflight = wbt_inflight(rwb);
437 int status; 347 int status;
438 348
439 status = latency_exceeded(rwb, cb->stat); 349 status = latency_exceeded(rwb, cb->stat);
440 350
441 trace_wbt_timer(rwb->queue->backing_dev_info, status, rwb->scale_step, 351 trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
442 inflight); 352 inflight);
443 353
444 /* 354 /*
@@ -469,9 +379,9 @@ static void wb_timer_fn(struct blk_stat_callback *cb)
469 * currently don't have a valid read/write sample. For that 379 * currently don't have a valid read/write sample. For that
470 * case, slowly return to center state (step == 0). 380 * case, slowly return to center state (step == 0).
471 */ 381 */
472 if (rwb->scale_step > 0) 382 if (rqd->scale_step > 0)
473 scale_up(rwb); 383 scale_up(rwb);
474 else if (rwb->scale_step < 0) 384 else if (rqd->scale_step < 0)
475 scale_down(rwb, false); 385 scale_down(rwb, false);
476 break; 386 break;
477 default: 387 default:
@@ -481,19 +391,50 @@ static void wb_timer_fn(struct blk_stat_callback *cb)
481 /* 391 /*
482 * Re-arm timer, if we have IO in flight 392 * Re-arm timer, if we have IO in flight
483 */ 393 */
484 if (rwb->scale_step || inflight) 394 if (rqd->scale_step || inflight)
485 rwb_arm_timer(rwb); 395 rwb_arm_timer(rwb);
486} 396}
487 397
488void wbt_update_limits(struct rq_wb *rwb) 398static void __wbt_update_limits(struct rq_wb *rwb)
489{ 399{
490 rwb->scale_step = 0; 400 struct rq_depth *rqd = &rwb->rq_depth;
491 rwb->scaled_max = false; 401
402 rqd->scale_step = 0;
403 rqd->scaled_max = false;
404
405 rq_depth_calc_max_depth(rqd);
492 calc_wb_limits(rwb); 406 calc_wb_limits(rwb);
493 407
494 rwb_wake_all(rwb); 408 rwb_wake_all(rwb);
495} 409}
496 410
411void wbt_update_limits(struct request_queue *q)
412{
413 struct rq_qos *rqos = wbt_rq_qos(q);
414 if (!rqos)
415 return;
416 __wbt_update_limits(RQWB(rqos));
417}
418
419u64 wbt_get_min_lat(struct request_queue *q)
420{
421 struct rq_qos *rqos = wbt_rq_qos(q);
422 if (!rqos)
423 return 0;
424 return RQWB(rqos)->min_lat_nsec;
425}
426
427void wbt_set_min_lat(struct request_queue *q, u64 val)
428{
429 struct rq_qos *rqos = wbt_rq_qos(q);
430 if (!rqos)
431 return;
432 RQWB(rqos)->min_lat_nsec = val;
433 RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
434 __wbt_update_limits(RQWB(rqos));
435}
436
437
497static bool close_io(struct rq_wb *rwb) 438static bool close_io(struct rq_wb *rwb)
498{ 439{
499 const unsigned long now = jiffies; 440 const unsigned long now = jiffies;
@@ -520,7 +461,7 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
520 * IO for a bit. 461 * IO for a bit.
521 */ 462 */
522 if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd()) 463 if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
523 limit = rwb->wb_max; 464 limit = rwb->rq_depth.max_depth;
524 else if ((rw & REQ_BACKGROUND) || close_io(rwb)) { 465 else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
525 /* 466 /*
526 * If less than 100ms since we completed unrelated IO, 467 * If less than 100ms since we completed unrelated IO,
@@ -554,7 +495,7 @@ static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
554 rqw->wait.head.next != &wait->entry) 495 rqw->wait.head.next != &wait->entry)
555 return false; 496 return false;
556 497
557 return atomic_inc_below(&rqw->inflight, get_limit(rwb, rw)); 498 return rq_wait_inc_below(rqw, get_limit(rwb, rw));
558} 499}
559 500
560/* 501/*
@@ -614,8 +555,10 @@ static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
614 * in an irq held spinlock, if it holds one when calling this function. 555 * in an irq held spinlock, if it holds one when calling this function.
615 * If we do sleep, we'll release and re-grab it. 556 * If we do sleep, we'll release and re-grab it.
616 */ 557 */
617enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock) 558static enum wbt_flags wbt_wait(struct rq_qos *rqos, struct bio *bio,
559 spinlock_t *lock)
618{ 560{
561 struct rq_wb *rwb = RQWB(rqos);
619 enum wbt_flags ret = 0; 562 enum wbt_flags ret = 0;
620 563
621 if (!rwb_enabled(rwb)) 564 if (!rwb_enabled(rwb))
@@ -643,8 +586,10 @@ enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
643 return ret | WBT_TRACKED; 586 return ret | WBT_TRACKED;
644} 587}
645 588
646void wbt_issue(struct rq_wb *rwb, struct request *rq) 589void wbt_issue(struct rq_qos *rqos, struct request *rq)
647{ 590{
591 struct rq_wb *rwb = RQWB(rqos);
592
648 if (!rwb_enabled(rwb)) 593 if (!rwb_enabled(rwb))
649 return; 594 return;
650 595
@@ -661,8 +606,9 @@ void wbt_issue(struct rq_wb *rwb, struct request *rq)
661 } 606 }
662} 607}
663 608
664void wbt_requeue(struct rq_wb *rwb, struct request *rq) 609void wbt_requeue(struct rq_qos *rqos, struct request *rq)
665{ 610{
611 struct rq_wb *rwb = RQWB(rqos);
666 if (!rwb_enabled(rwb)) 612 if (!rwb_enabled(rwb))
667 return; 613 return;
668 if (rq == rwb->sync_cookie) { 614 if (rq == rwb->sync_cookie) {
@@ -671,39 +617,30 @@ void wbt_requeue(struct rq_wb *rwb, struct request *rq)
671 } 617 }
672} 618}
673 619
674void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth) 620void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
675{ 621{
676 if (rwb) { 622 struct rq_qos *rqos = wbt_rq_qos(q);
677 rwb->queue_depth = depth; 623 if (rqos) {
678 wbt_update_limits(rwb); 624 RQWB(rqos)->rq_depth.queue_depth = depth;
625 __wbt_update_limits(RQWB(rqos));
679 } 626 }
680} 627}
681 628
682void wbt_set_write_cache(struct rq_wb *rwb, bool write_cache_on) 629void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
683{
684 if (rwb)
685 rwb->wc = write_cache_on;
686}
687
688/*
689 * Disable wbt, if enabled by default.
690 */
691void wbt_disable_default(struct request_queue *q)
692{ 630{
693 struct rq_wb *rwb = q->rq_wb; 631 struct rq_qos *rqos = wbt_rq_qos(q);
694 632 if (rqos)
695 if (rwb && rwb->enable_state == WBT_STATE_ON_DEFAULT) 633 RQWB(rqos)->wc = write_cache_on;
696 wbt_exit(q);
697} 634}
698EXPORT_SYMBOL_GPL(wbt_disable_default);
699 635
700/* 636/*
701 * Enable wbt if defaults are configured that way 637 * Enable wbt if defaults are configured that way
702 */ 638 */
703void wbt_enable_default(struct request_queue *q) 639void wbt_enable_default(struct request_queue *q)
704{ 640{
641 struct rq_qos *rqos = wbt_rq_qos(q);
705 /* Throttling already enabled? */ 642 /* Throttling already enabled? */
706 if (q->rq_wb) 643 if (rqos)
707 return; 644 return;
708 645
709 /* Queue not registered? Maybe shutting down... */ 646 /* Queue not registered? Maybe shutting down... */
@@ -741,6 +678,41 @@ static int wbt_data_dir(const struct request *rq)
741 return -1; 678 return -1;
742} 679}
743 680
681static void wbt_exit(struct rq_qos *rqos)
682{
683 struct rq_wb *rwb = RQWB(rqos);
684 struct request_queue *q = rqos->q;
685
686 blk_stat_remove_callback(q, rwb->cb);
687 blk_stat_free_callback(rwb->cb);
688 kfree(rwb);
689}
690
691/*
692 * Disable wbt, if enabled by default.
693 */
694void wbt_disable_default(struct request_queue *q)
695{
696 struct rq_qos *rqos = wbt_rq_qos(q);
697 struct rq_wb *rwb;
698 if (!rqos)
699 return;
700 rwb = RQWB(rqos);
701 if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
702 rwb->wb_normal = 0;
703}
704EXPORT_SYMBOL_GPL(wbt_disable_default);
705
706
707static struct rq_qos_ops wbt_rqos_ops = {
708 .throttle = wbt_wait,
709 .issue = wbt_issue,
710 .requeue = wbt_requeue,
711 .done = wbt_done,
712 .cleanup = __wbt_done,
713 .exit = wbt_exit,
714};
715
744int wbt_init(struct request_queue *q) 716int wbt_init(struct request_queue *q)
745{ 717{
746 struct rq_wb *rwb; 718 struct rq_wb *rwb;
@@ -756,39 +728,29 @@ int wbt_init(struct request_queue *q)
756 return -ENOMEM; 728 return -ENOMEM;
757 } 729 }
758 730
759 for (i = 0; i < WBT_NUM_RWQ; i++) { 731 for (i = 0; i < WBT_NUM_RWQ; i++)
760 atomic_set(&rwb->rq_wait[i].inflight, 0); 732 rq_wait_init(&rwb->rq_wait[i]);
761 init_waitqueue_head(&rwb->rq_wait[i].wait);
762 }
763 733
734 rwb->rqos.id = RQ_QOS_WBT;
735 rwb->rqos.ops = &wbt_rqos_ops;
736 rwb->rqos.q = q;
764 rwb->last_comp = rwb->last_issue = jiffies; 737 rwb->last_comp = rwb->last_issue = jiffies;
765 rwb->queue = q;
766 rwb->win_nsec = RWB_WINDOW_NSEC; 738 rwb->win_nsec = RWB_WINDOW_NSEC;
767 rwb->enable_state = WBT_STATE_ON_DEFAULT; 739 rwb->enable_state = WBT_STATE_ON_DEFAULT;
768 wbt_update_limits(rwb); 740 rwb->wc = 1;
741 rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
742 __wbt_update_limits(rwb);
769 743
770 /* 744 /*
771 * Assign rwb and add the stats callback. 745 * Assign rwb and add the stats callback.
772 */ 746 */
773 q->rq_wb = rwb; 747 rq_qos_add(q, &rwb->rqos);
774 blk_stat_add_callback(q, rwb->cb); 748 blk_stat_add_callback(q, rwb->cb);
775 749
776 rwb->min_lat_nsec = wbt_default_latency_nsec(q); 750 rwb->min_lat_nsec = wbt_default_latency_nsec(q);
777 751
778 wbt_set_queue_depth(rwb, blk_queue_depth(q)); 752 wbt_set_queue_depth(q, blk_queue_depth(q));
779 wbt_set_write_cache(rwb, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); 753 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
780 754
781 return 0; 755 return 0;
782} 756}
783
784void wbt_exit(struct request_queue *q)
785{
786 struct rq_wb *rwb = q->rq_wb;
787
788 if (rwb) {
789 blk_stat_remove_callback(q, rwb->cb);
790 blk_stat_free_callback(rwb->cb);
791 q->rq_wb = NULL;
792 kfree(rwb);
793 }
794}