diff options
author | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:35 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:35 -0400 |
commit | 651930bc1c2a2550fde93a8cfa1a201c363a0ca1 (patch) | |
tree | e043b460d7a681d846057a52bd8ed221dfaa1f6e /block | |
parent | 73f0d49a9637a7ec3448a62a0042e35b14ba18a3 (diff) |
blk-throttle: dispatch to throtl_data->service_queue.bio_lists[]
throtl_service_queues will eventually form a tree which is anchored at
throtl_data->service_queue and queue bios will climb the tree to the
top service_queue to be executed.
This patch makes the dispatch paths in blk_throtl_dispatch_work_fn()
and blk_throtl_drain() to dispatch bios to
throtl_data->service_queue.bio_lists[] instead of the on-stack
bio_lists. This will keep the final dispatch to the top level
service_queue share the same mechanism as dispatches through the rest
of the hierarchy.
As bio's should be issued in a sleepable context,
blk_throtl_dispatch_work_fn() transfers all dispatched bio's from the
service_queue bio_lists[] into an onstack one before dropping
queue_lock and issuing the bio's.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-throttle.c | 40 |
1 files changed, 23 insertions, 17 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 6f57f94c3c57..154bd63719c5 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -743,7 +743,7 @@ static void tg_update_disptime(struct throtl_grp *tg, | |||
743 | } | 743 | } |
744 | 744 | ||
745 | static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw, | 745 | static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw, |
746 | struct bio_list *bl) | 746 | struct throtl_service_queue *parent_sq) |
747 | { | 747 | { |
748 | struct throtl_service_queue *sq = &tg->service_queue; | 748 | struct throtl_service_queue *sq = &tg->service_queue; |
749 | struct bio *bio; | 749 | struct bio *bio; |
@@ -757,13 +757,14 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw, | |||
757 | tg->td->nr_queued[rw]--; | 757 | tg->td->nr_queued[rw]--; |
758 | 758 | ||
759 | throtl_charge_bio(tg, bio); | 759 | throtl_charge_bio(tg, bio); |
760 | bio_list_add(bl, bio); | 760 | bio_list_add(&parent_sq->bio_lists[rw], bio); |
761 | bio->bi_rw |= REQ_THROTTLED; | 761 | bio->bi_rw |= REQ_THROTTLED; |
762 | 762 | ||
763 | throtl_trim_slice(tg, rw); | 763 | throtl_trim_slice(tg, rw); |
764 | } | 764 | } |
765 | 765 | ||
766 | static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl) | 766 | static int throtl_dispatch_tg(struct throtl_grp *tg, |
767 | struct throtl_service_queue *parent_sq) | ||
767 | { | 768 | { |
768 | struct throtl_service_queue *sq = &tg->service_queue; | 769 | struct throtl_service_queue *sq = &tg->service_queue; |
769 | unsigned int nr_reads = 0, nr_writes = 0; | 770 | unsigned int nr_reads = 0, nr_writes = 0; |
@@ -776,7 +777,7 @@ static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl) | |||
776 | while ((bio = bio_list_peek(&sq->bio_lists[READ])) && | 777 | while ((bio = bio_list_peek(&sq->bio_lists[READ])) && |
777 | tg_may_dispatch(tg, bio, NULL)) { | 778 | tg_may_dispatch(tg, bio, NULL)) { |
778 | 779 | ||
779 | tg_dispatch_one_bio(tg, bio_data_dir(bio), bl); | 780 | tg_dispatch_one_bio(tg, bio_data_dir(bio), parent_sq); |
780 | nr_reads++; | 781 | nr_reads++; |
781 | 782 | ||
782 | if (nr_reads >= max_nr_reads) | 783 | if (nr_reads >= max_nr_reads) |
@@ -786,7 +787,7 @@ static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl) | |||
786 | while ((bio = bio_list_peek(&sq->bio_lists[WRITE])) && | 787 | while ((bio = bio_list_peek(&sq->bio_lists[WRITE])) && |
787 | tg_may_dispatch(tg, bio, NULL)) { | 788 | tg_may_dispatch(tg, bio, NULL)) { |
788 | 789 | ||
789 | tg_dispatch_one_bio(tg, bio_data_dir(bio), bl); | 790 | tg_dispatch_one_bio(tg, bio_data_dir(bio), parent_sq); |
790 | nr_writes++; | 791 | nr_writes++; |
791 | 792 | ||
792 | if (nr_writes >= max_nr_writes) | 793 | if (nr_writes >= max_nr_writes) |
@@ -796,8 +797,7 @@ static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl) | |||
796 | return nr_reads + nr_writes; | 797 | return nr_reads + nr_writes; |
797 | } | 798 | } |
798 | 799 | ||
799 | static int throtl_select_dispatch(struct throtl_service_queue *parent_sq, | 800 | static int throtl_select_dispatch(struct throtl_service_queue *parent_sq) |
800 | struct bio_list *bl) | ||
801 | { | 801 | { |
802 | unsigned int nr_disp = 0; | 802 | unsigned int nr_disp = 0; |
803 | 803 | ||
@@ -813,7 +813,7 @@ static int throtl_select_dispatch(struct throtl_service_queue *parent_sq, | |||
813 | 813 | ||
814 | throtl_dequeue_tg(tg, parent_sq); | 814 | throtl_dequeue_tg(tg, parent_sq); |
815 | 815 | ||
816 | nr_disp += throtl_dispatch_tg(tg, bl); | 816 | nr_disp += throtl_dispatch_tg(tg, parent_sq); |
817 | 817 | ||
818 | if (sq->nr_queued[0] || sq->nr_queued[1]) | 818 | if (sq->nr_queued[0] || sq->nr_queued[1]) |
819 | tg_update_disptime(tg, parent_sq); | 819 | tg_update_disptime(tg, parent_sq); |
@@ -830,11 +830,13 @@ void blk_throtl_dispatch_work_fn(struct work_struct *work) | |||
830 | { | 830 | { |
831 | struct throtl_data *td = container_of(to_delayed_work(work), | 831 | struct throtl_data *td = container_of(to_delayed_work(work), |
832 | struct throtl_data, dispatch_work); | 832 | struct throtl_data, dispatch_work); |
833 | struct throtl_service_queue *sq = &td->service_queue; | ||
833 | struct request_queue *q = td->queue; | 834 | struct request_queue *q = td->queue; |
834 | unsigned int nr_disp = 0; | 835 | unsigned int nr_disp = 0; |
835 | struct bio_list bio_list_on_stack; | 836 | struct bio_list bio_list_on_stack; |
836 | struct bio *bio; | 837 | struct bio *bio; |
837 | struct blk_plug plug; | 838 | struct blk_plug plug; |
839 | int rw; | ||
838 | 840 | ||
839 | spin_lock_irq(q->queue_lock); | 841 | spin_lock_irq(q->queue_lock); |
840 | 842 | ||
@@ -844,10 +846,15 @@ void blk_throtl_dispatch_work_fn(struct work_struct *work) | |||
844 | td->nr_queued[READ] + td->nr_queued[WRITE], | 846 | td->nr_queued[READ] + td->nr_queued[WRITE], |
845 | td->nr_queued[READ], td->nr_queued[WRITE]); | 847 | td->nr_queued[READ], td->nr_queued[WRITE]); |
846 | 848 | ||
847 | nr_disp = throtl_select_dispatch(&td->service_queue, &bio_list_on_stack); | 849 | nr_disp = throtl_select_dispatch(sq); |
848 | 850 | ||
849 | if (nr_disp) | 851 | if (nr_disp) { |
852 | for (rw = READ; rw <= WRITE; rw++) { | ||
853 | bio_list_merge(&bio_list_on_stack, &sq->bio_lists[rw]); | ||
854 | bio_list_init(&sq->bio_lists[rw]); | ||
855 | } | ||
850 | throtl_log(td, "bios disp=%u", nr_disp); | 856 | throtl_log(td, "bios disp=%u", nr_disp); |
857 | } | ||
851 | 858 | ||
852 | throtl_schedule_next_dispatch(td); | 859 | throtl_schedule_next_dispatch(td); |
853 | 860 | ||
@@ -1156,27 +1163,26 @@ void blk_throtl_drain(struct request_queue *q) | |||
1156 | struct throtl_data *td = q->td; | 1163 | struct throtl_data *td = q->td; |
1157 | struct throtl_service_queue *parent_sq = &td->service_queue; | 1164 | struct throtl_service_queue *parent_sq = &td->service_queue; |
1158 | struct throtl_grp *tg; | 1165 | struct throtl_grp *tg; |
1159 | struct bio_list bl; | ||
1160 | struct bio *bio; | 1166 | struct bio *bio; |
1167 | int rw; | ||
1161 | 1168 | ||
1162 | queue_lockdep_assert_held(q); | 1169 | queue_lockdep_assert_held(q); |
1163 | 1170 | ||
1164 | bio_list_init(&bl); | ||
1165 | |||
1166 | while ((tg = throtl_rb_first(parent_sq))) { | 1171 | while ((tg = throtl_rb_first(parent_sq))) { |
1167 | struct throtl_service_queue *sq = &tg->service_queue; | 1172 | struct throtl_service_queue *sq = &tg->service_queue; |
1168 | 1173 | ||
1169 | throtl_dequeue_tg(tg, parent_sq); | 1174 | throtl_dequeue_tg(tg, parent_sq); |
1170 | 1175 | ||
1171 | while ((bio = bio_list_peek(&sq->bio_lists[READ]))) | 1176 | while ((bio = bio_list_peek(&sq->bio_lists[READ]))) |
1172 | tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl); | 1177 | tg_dispatch_one_bio(tg, bio_data_dir(bio), parent_sq); |
1173 | while ((bio = bio_list_peek(&sq->bio_lists[WRITE]))) | 1178 | while ((bio = bio_list_peek(&sq->bio_lists[WRITE]))) |
1174 | tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl); | 1179 | tg_dispatch_one_bio(tg, bio_data_dir(bio), parent_sq); |
1175 | } | 1180 | } |
1176 | spin_unlock_irq(q->queue_lock); | 1181 | spin_unlock_irq(q->queue_lock); |
1177 | 1182 | ||
1178 | while ((bio = bio_list_pop(&bl))) | 1183 | for (rw = READ; rw <= WRITE; rw++) |
1179 | generic_make_request(bio); | 1184 | while ((bio = bio_list_pop(&parent_sq->bio_lists[rw]))) |
1185 | generic_make_request(bio); | ||
1180 | 1186 | ||
1181 | spin_lock_irq(q->queue_lock); | 1187 | spin_lock_irq(q->queue_lock); |
1182 | } | 1188 | } |