diff options
author | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:32 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:32 -0400 |
commit | 0f3457f60edc57332bf6564fa00d561a4372dcb9 (patch) | |
tree | bcfd3cc85b2acc0df06f7759b58fcf49110d3264 /block/blk-throttle.c | |
parent | 5b2c16aae0c074c3bb546c4c066ca7064684553c (diff) |
blk-throttle: add backlink pointer from throtl_grp to throtl_data
Add throtl_grp->td so that the td (throtl_data) a given tg
(throtl_grp) belongs to can be determined, and remove @td argument
from functions which take both @td and @tg as the former now can be
determined from the latter.
This generally simplifies the code and removes a number of cases where
@td is passed as an argument without being actually used. This will
also help hierarchy support implementation.
While at it, in multi-line conditions, move the logical operators
leading broken lines to the end of the previous line.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r-- | block/blk-throttle.c | 106 |
1 files changed, 53 insertions, 53 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index e8ef43d3fab3..a489391f9153 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -57,6 +57,9 @@ struct throtl_grp { | |||
57 | /* active throtl group service_queue member */ | 57 | /* active throtl group service_queue member */ |
58 | struct rb_node rb_node; | 58 | struct rb_node rb_node; |
59 | 59 | ||
60 | /* throtl_data this group belongs to */ | ||
61 | struct throtl_data *td; | ||
62 | |||
60 | /* | 63 | /* |
61 | * Dispatch time in jiffies. This is the estimated time when group | 64 | * Dispatch time in jiffies. This is the estimated time when group |
62 | * will unthrottle and is ready to dispatch more bio. It is used as | 65 | * will unthrottle and is ready to dispatch more bio. It is used as |
@@ -140,11 +143,11 @@ static inline struct throtl_grp *td_root_tg(struct throtl_data *td) | |||
140 | return blkg_to_tg(td->queue->root_blkg); | 143 | return blkg_to_tg(td->queue->root_blkg); |
141 | } | 144 | } |
142 | 145 | ||
143 | #define throtl_log_tg(td, tg, fmt, args...) do { \ | 146 | #define throtl_log_tg(tg, fmt, args...) do { \ |
144 | char __pbuf[128]; \ | 147 | char __pbuf[128]; \ |
145 | \ | 148 | \ |
146 | blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \ | 149 | blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \ |
147 | blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \ | 150 | blk_add_trace_msg((tg)->td->queue, "throtl %s " fmt, __pbuf, ##args); \ |
148 | } while (0) | 151 | } while (0) |
149 | 152 | ||
150 | #define throtl_log(td, fmt, args...) \ | 153 | #define throtl_log(td, fmt, args...) \ |
@@ -193,6 +196,7 @@ static void throtl_pd_init(struct blkcg_gq *blkg) | |||
193 | unsigned long flags; | 196 | unsigned long flags; |
194 | 197 | ||
195 | RB_CLEAR_NODE(&tg->rb_node); | 198 | RB_CLEAR_NODE(&tg->rb_node); |
199 | tg->td = blkg->q->td; | ||
196 | bio_list_init(&tg->bio_lists[0]); | 200 | bio_list_init(&tg->bio_lists[0]); |
197 | bio_list_init(&tg->bio_lists[1]); | 201 | bio_list_init(&tg->bio_lists[1]); |
198 | 202 | ||
@@ -401,36 +405,34 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td) | |||
401 | throtl_schedule_delayed_work(td, sq->first_pending_disptime - jiffies); | 405 | throtl_schedule_delayed_work(td, sq->first_pending_disptime - jiffies); |
402 | } | 406 | } |
403 | 407 | ||
404 | static inline void | 408 | static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) |
405 | throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw) | ||
406 | { | 409 | { |
407 | tg->bytes_disp[rw] = 0; | 410 | tg->bytes_disp[rw] = 0; |
408 | tg->io_disp[rw] = 0; | 411 | tg->io_disp[rw] = 0; |
409 | tg->slice_start[rw] = jiffies; | 412 | tg->slice_start[rw] = jiffies; |
410 | tg->slice_end[rw] = jiffies + throtl_slice; | 413 | tg->slice_end[rw] = jiffies + throtl_slice; |
411 | throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu", | 414 | throtl_log_tg(tg, "[%c] new slice start=%lu end=%lu jiffies=%lu", |
412 | rw == READ ? 'R' : 'W', tg->slice_start[rw], | 415 | rw == READ ? 'R' : 'W', tg->slice_start[rw], |
413 | tg->slice_end[rw], jiffies); | 416 | tg->slice_end[rw], jiffies); |
414 | } | 417 | } |
415 | 418 | ||
416 | static inline void throtl_set_slice_end(struct throtl_data *td, | 419 | static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, |
417 | struct throtl_grp *tg, bool rw, unsigned long jiffy_end) | 420 | unsigned long jiffy_end) |
418 | { | 421 | { |
419 | tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); | 422 | tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); |
420 | } | 423 | } |
421 | 424 | ||
422 | static inline void throtl_extend_slice(struct throtl_data *td, | 425 | static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, |
423 | struct throtl_grp *tg, bool rw, unsigned long jiffy_end) | 426 | unsigned long jiffy_end) |
424 | { | 427 | { |
425 | tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); | 428 | tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); |
426 | throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu", | 429 | throtl_log_tg(tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu", |
427 | rw == READ ? 'R' : 'W', tg->slice_start[rw], | 430 | rw == READ ? 'R' : 'W', tg->slice_start[rw], |
428 | tg->slice_end[rw], jiffies); | 431 | tg->slice_end[rw], jiffies); |
429 | } | 432 | } |
430 | 433 | ||
431 | /* Determine if previously allocated or extended slice is complete or not */ | 434 | /* Determine if previously allocated or extended slice is complete or not */ |
432 | static bool | 435 | static bool throtl_slice_used(struct throtl_grp *tg, bool rw) |
433 | throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw) | ||
434 | { | 436 | { |
435 | if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) | 437 | if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) |
436 | return 0; | 438 | return 0; |
@@ -439,8 +441,7 @@ throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw) | |||
439 | } | 441 | } |
440 | 442 | ||
441 | /* Trim the used slices and adjust slice start accordingly */ | 443 | /* Trim the used slices and adjust slice start accordingly */ |
442 | static inline void | 444 | static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) |
443 | throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw) | ||
444 | { | 445 | { |
445 | unsigned long nr_slices, time_elapsed, io_trim; | 446 | unsigned long nr_slices, time_elapsed, io_trim; |
446 | u64 bytes_trim, tmp; | 447 | u64 bytes_trim, tmp; |
@@ -452,7 +453,7 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw) | |||
452 | * renewed. Don't try to trim the slice if slice is used. A new | 453 | * renewed. Don't try to trim the slice if slice is used. A new |
453 | * slice will start when appropriate. | 454 | * slice will start when appropriate. |
454 | */ | 455 | */ |
455 | if (throtl_slice_used(td, tg, rw)) | 456 | if (throtl_slice_used(tg, rw)) |
456 | return; | 457 | return; |
457 | 458 | ||
458 | /* | 459 | /* |
@@ -463,7 +464,7 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw) | |||
463 | * is bad because it does not allow new slice to start. | 464 | * is bad because it does not allow new slice to start. |
464 | */ | 465 | */ |
465 | 466 | ||
466 | throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice); | 467 | throtl_set_slice_end(tg, rw, jiffies + throtl_slice); |
467 | 468 | ||
468 | time_elapsed = jiffies - tg->slice_start[rw]; | 469 | time_elapsed = jiffies - tg->slice_start[rw]; |
469 | 470 | ||
@@ -492,14 +493,14 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw) | |||
492 | 493 | ||
493 | tg->slice_start[rw] += nr_slices * throtl_slice; | 494 | tg->slice_start[rw] += nr_slices * throtl_slice; |
494 | 495 | ||
495 | throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu" | 496 | throtl_log_tg(tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu" |
496 | " start=%lu end=%lu jiffies=%lu", | 497 | " start=%lu end=%lu jiffies=%lu", |
497 | rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim, | 498 | rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim, |
498 | tg->slice_start[rw], tg->slice_end[rw], jiffies); | 499 | tg->slice_start[rw], tg->slice_end[rw], jiffies); |
499 | } | 500 | } |
500 | 501 | ||
501 | static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg, | 502 | static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, |
502 | struct bio *bio, unsigned long *wait) | 503 | unsigned long *wait) |
503 | { | 504 | { |
504 | bool rw = bio_data_dir(bio); | 505 | bool rw = bio_data_dir(bio); |
505 | unsigned int io_allowed; | 506 | unsigned int io_allowed; |
@@ -548,8 +549,8 @@ static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg, | |||
548 | return 0; | 549 | return 0; |
549 | } | 550 | } |
550 | 551 | ||
551 | static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg, | 552 | static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, |
552 | struct bio *bio, unsigned long *wait) | 553 | unsigned long *wait) |
553 | { | 554 | { |
554 | bool rw = bio_data_dir(bio); | 555 | bool rw = bio_data_dir(bio); |
555 | u64 bytes_allowed, extra_bytes, tmp; | 556 | u64 bytes_allowed, extra_bytes, tmp; |
@@ -600,8 +601,8 @@ static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) { | |||
600 | * Returns whether one can dispatch a bio or not. Also returns approx number | 601 | * Returns whether one can dispatch a bio or not. Also returns approx number |
601 | * of jiffies to wait before this bio is with-in IO rate and can be dispatched | 602 | * of jiffies to wait before this bio is with-in IO rate and can be dispatched |
602 | */ | 603 | */ |
603 | static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg, | 604 | static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, |
604 | struct bio *bio, unsigned long *wait) | 605 | unsigned long *wait) |
605 | { | 606 | { |
606 | bool rw = bio_data_dir(bio); | 607 | bool rw = bio_data_dir(bio); |
607 | unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0; | 608 | unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0; |
@@ -626,15 +627,15 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg, | |||
626 | * existing slice to make sure it is at least throtl_slice interval | 627 | * existing slice to make sure it is at least throtl_slice interval |
627 | * long since now. | 628 | * long since now. |
628 | */ | 629 | */ |
629 | if (throtl_slice_used(td, tg, rw)) | 630 | if (throtl_slice_used(tg, rw)) |
630 | throtl_start_new_slice(td, tg, rw); | 631 | throtl_start_new_slice(tg, rw); |
631 | else { | 632 | else { |
632 | if (time_before(tg->slice_end[rw], jiffies + throtl_slice)) | 633 | if (time_before(tg->slice_end[rw], jiffies + throtl_slice)) |
633 | throtl_extend_slice(td, tg, rw, jiffies + throtl_slice); | 634 | throtl_extend_slice(tg, rw, jiffies + throtl_slice); |
634 | } | 635 | } |
635 | 636 | ||
636 | if (tg_with_in_bps_limit(td, tg, bio, &bps_wait) | 637 | if (tg_with_in_bps_limit(tg, bio, &bps_wait) && |
637 | && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) { | 638 | tg_with_in_iops_limit(tg, bio, &iops_wait)) { |
638 | if (wait) | 639 | if (wait) |
639 | *wait = 0; | 640 | *wait = 0; |
640 | return 1; | 641 | return 1; |
@@ -646,7 +647,7 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg, | |||
646 | *wait = max_wait; | 647 | *wait = max_wait; |
647 | 648 | ||
648 | if (time_before(tg->slice_end[rw], jiffies + max_wait)) | 649 | if (time_before(tg->slice_end[rw], jiffies + max_wait)) |
649 | throtl_extend_slice(td, tg, rw, jiffies + max_wait); | 650 | throtl_extend_slice(tg, rw, jiffies + max_wait); |
650 | 651 | ||
651 | return 0; | 652 | return 0; |
652 | } | 653 | } |
@@ -707,10 +708,10 @@ static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg) | |||
707 | struct bio *bio; | 708 | struct bio *bio; |
708 | 709 | ||
709 | if ((bio = bio_list_peek(&tg->bio_lists[READ]))) | 710 | if ((bio = bio_list_peek(&tg->bio_lists[READ]))) |
710 | tg_may_dispatch(td, tg, bio, &read_wait); | 711 | tg_may_dispatch(tg, bio, &read_wait); |
711 | 712 | ||
712 | if ((bio = bio_list_peek(&tg->bio_lists[WRITE]))) | 713 | if ((bio = bio_list_peek(&tg->bio_lists[WRITE]))) |
713 | tg_may_dispatch(td, tg, bio, &write_wait); | 714 | tg_may_dispatch(tg, bio, &write_wait); |
714 | 715 | ||
715 | min_wait = min(read_wait, write_wait); | 716 | min_wait = min(read_wait, write_wait); |
716 | disptime = jiffies + min_wait; | 717 | disptime = jiffies + min_wait; |
@@ -721,8 +722,8 @@ static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg) | |||
721 | throtl_enqueue_tg(td, tg); | 722 | throtl_enqueue_tg(td, tg); |
722 | } | 723 | } |
723 | 724 | ||
724 | static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg, | 725 | static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw, |
725 | bool rw, struct bio_list *bl) | 726 | struct bio_list *bl) |
726 | { | 727 | { |
727 | struct bio *bio; | 728 | struct bio *bio; |
728 | 729 | ||
@@ -731,18 +732,17 @@ static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg, | |||
731 | /* Drop bio reference on blkg */ | 732 | /* Drop bio reference on blkg */ |
732 | blkg_put(tg_to_blkg(tg)); | 733 | blkg_put(tg_to_blkg(tg)); |
733 | 734 | ||
734 | BUG_ON(td->nr_queued[rw] <= 0); | 735 | BUG_ON(tg->td->nr_queued[rw] <= 0); |
735 | td->nr_queued[rw]--; | 736 | tg->td->nr_queued[rw]--; |
736 | 737 | ||
737 | throtl_charge_bio(tg, bio); | 738 | throtl_charge_bio(tg, bio); |
738 | bio_list_add(bl, bio); | 739 | bio_list_add(bl, bio); |
739 | bio->bi_rw |= REQ_THROTTLED; | 740 | bio->bi_rw |= REQ_THROTTLED; |
740 | 741 | ||
741 | throtl_trim_slice(td, tg, rw); | 742 | throtl_trim_slice(tg, rw); |
742 | } | 743 | } |
743 | 744 | ||
744 | static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg, | 745 | static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl) |
745 | struct bio_list *bl) | ||
746 | { | 746 | { |
747 | unsigned int nr_reads = 0, nr_writes = 0; | 747 | unsigned int nr_reads = 0, nr_writes = 0; |
748 | unsigned int max_nr_reads = throtl_grp_quantum*3/4; | 748 | unsigned int max_nr_reads = throtl_grp_quantum*3/4; |
@@ -751,20 +751,20 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg, | |||
751 | 751 | ||
752 | /* Try to dispatch 75% READS and 25% WRITES */ | 752 | /* Try to dispatch 75% READS and 25% WRITES */ |
753 | 753 | ||
754 | while ((bio = bio_list_peek(&tg->bio_lists[READ])) | 754 | while ((bio = bio_list_peek(&tg->bio_lists[READ])) && |
755 | && tg_may_dispatch(td, tg, bio, NULL)) { | 755 | tg_may_dispatch(tg, bio, NULL)) { |
756 | 756 | ||
757 | tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl); | 757 | tg_dispatch_one_bio(tg, bio_data_dir(bio), bl); |
758 | nr_reads++; | 758 | nr_reads++; |
759 | 759 | ||
760 | if (nr_reads >= max_nr_reads) | 760 | if (nr_reads >= max_nr_reads) |
761 | break; | 761 | break; |
762 | } | 762 | } |
763 | 763 | ||
764 | while ((bio = bio_list_peek(&tg->bio_lists[WRITE])) | 764 | while ((bio = bio_list_peek(&tg->bio_lists[WRITE])) && |
765 | && tg_may_dispatch(td, tg, bio, NULL)) { | 765 | tg_may_dispatch(tg, bio, NULL)) { |
766 | 766 | ||
767 | tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl); | 767 | tg_dispatch_one_bio(tg, bio_data_dir(bio), bl); |
768 | nr_writes++; | 768 | nr_writes++; |
769 | 769 | ||
770 | if (nr_writes >= max_nr_writes) | 770 | if (nr_writes >= max_nr_writes) |
@@ -791,7 +791,7 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl) | |||
791 | 791 | ||
792 | throtl_dequeue_tg(td, tg); | 792 | throtl_dequeue_tg(td, tg); |
793 | 793 | ||
794 | nr_disp += throtl_dispatch_tg(td, tg, bl); | 794 | nr_disp += throtl_dispatch_tg(tg, bl); |
795 | 795 | ||
796 | if (tg->nr_queued[0] || tg->nr_queued[1]) | 796 | if (tg->nr_queued[0] || tg->nr_queued[1]) |
797 | tg_update_disptime(td, tg); | 797 | tg_update_disptime(td, tg); |
@@ -933,7 +933,7 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf, | |||
933 | else | 933 | else |
934 | *(unsigned int *)((void *)tg + cft->private) = ctx.v; | 934 | *(unsigned int *)((void *)tg + cft->private) = ctx.v; |
935 | 935 | ||
936 | throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu riops=%u wiops=%u", | 936 | throtl_log_tg(tg, "limit change rbps=%llu wbps=%llu riops=%u wiops=%u", |
937 | tg->bps[READ], tg->bps[WRITE], | 937 | tg->bps[READ], tg->bps[WRITE], |
938 | tg->iops[READ], tg->iops[WRITE]); | 938 | tg->iops[READ], tg->iops[WRITE]); |
939 | 939 | ||
@@ -945,8 +945,8 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf, | |||
945 | * that a group's limit are dropped suddenly and we don't want to | 945 | * that a group's limit are dropped suddenly and we don't want to |
946 | * account recently dispatched IO with new low rate. | 946 | * account recently dispatched IO with new low rate. |
947 | */ | 947 | */ |
948 | throtl_start_new_slice(td, tg, 0); | 948 | throtl_start_new_slice(tg, 0); |
949 | throtl_start_new_slice(td, tg, 1); | 949 | throtl_start_new_slice(tg, 1); |
950 | 950 | ||
951 | if (tg->flags & THROTL_TG_PENDING) { | 951 | if (tg->flags & THROTL_TG_PENDING) { |
952 | tg_update_disptime(td, tg); | 952 | tg_update_disptime(td, tg); |
@@ -1076,7 +1076,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) | |||
1076 | } | 1076 | } |
1077 | 1077 | ||
1078 | /* Bio is with-in rate limit of group */ | 1078 | /* Bio is with-in rate limit of group */ |
1079 | if (tg_may_dispatch(td, tg, bio, NULL)) { | 1079 | if (tg_may_dispatch(tg, bio, NULL)) { |
1080 | throtl_charge_bio(tg, bio); | 1080 | throtl_charge_bio(tg, bio); |
1081 | 1081 | ||
1082 | /* | 1082 | /* |
@@ -1090,12 +1090,12 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) | |||
1090 | * | 1090 | * |
1091 | * So keep on trimming slice even if bio is not queued. | 1091 | * So keep on trimming slice even if bio is not queued. |
1092 | */ | 1092 | */ |
1093 | throtl_trim_slice(td, tg, rw); | 1093 | throtl_trim_slice(tg, rw); |
1094 | goto out_unlock; | 1094 | goto out_unlock; |
1095 | } | 1095 | } |
1096 | 1096 | ||
1097 | queue_bio: | 1097 | queue_bio: |
1098 | throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu" | 1098 | throtl_log_tg(tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu" |
1099 | " iodisp=%u iops=%u queued=%d/%d", | 1099 | " iodisp=%u iops=%u queued=%d/%d", |
1100 | rw == READ ? 'R' : 'W', | 1100 | rw == READ ? 'R' : 'W', |
1101 | tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], | 1101 | tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], |
@@ -1142,9 +1142,9 @@ void blk_throtl_drain(struct request_queue *q) | |||
1142 | throtl_dequeue_tg(td, tg); | 1142 | throtl_dequeue_tg(td, tg); |
1143 | 1143 | ||
1144 | while ((bio = bio_list_peek(&tg->bio_lists[READ]))) | 1144 | while ((bio = bio_list_peek(&tg->bio_lists[READ]))) |
1145 | tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl); | 1145 | tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl); |
1146 | while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))) | 1146 | while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))) |
1147 | tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl); | 1147 | tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl); |
1148 | } | 1148 | } |
1149 | spin_unlock_irq(q->queue_lock); | 1149 | spin_unlock_irq(q->queue_lock); |
1150 | 1150 | ||