aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-05-14 16:52:35 -0400
committerTejun Heo <tj@kernel.org>2013-05-14 16:52:35 -0400
commit73f0d49a9637a7ec3448a62a0042e35b14ba18a3 (patch)
treee75b1d7c43d29dee8739ec5144d75e83998e3644 /block/blk-throttle.c
parent49a2f1e3f231f6b2ccfc8192f4c395de7fa910a1 (diff)
blk-throttle: move bio_lists[] and friends to throtl_service_queue
throtl_service_queues will eventually form a tree which is anchored at throtl_data->service_queue and queue bios will climb the tree to the top service_queue to be executed. This patch moves bio_lists[] and nr_queued[] from throtl_grp to its service_queue to prepare for that. As currently only the throtl_data->service_queue is in use, this patch just ends up moving throtl_grp->bio_lists[] and ->nr_queued[] to throtl_grp->service_queue.bio_lists[] and ->nr_queued[] without making any functional differences. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c63
1 files changed, 39 insertions, 24 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 7340440ccfb5..6f57f94c3c57 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -27,6 +27,17 @@ static struct blkcg_policy blkcg_policy_throtl;
27static struct workqueue_struct *kthrotld_workqueue; 27static struct workqueue_struct *kthrotld_workqueue;
28 28
29struct throtl_service_queue { 29struct throtl_service_queue {
30 /*
31 * Bios queued directly to this service_queue or dispatched from
32 * children throtl_grp's.
33 */
34 struct bio_list bio_lists[2]; /* queued bios [READ/WRITE] */
35 unsigned int nr_queued[2]; /* number of queued bios */
36
37 /*
38 * RB tree of active children throtl_grp's, which are sorted by
39 * their ->disptime.
40 */
30 struct rb_root pending_tree; /* RB tree of active tgs */ 41 struct rb_root pending_tree; /* RB tree of active tgs */
31 struct rb_node *first_pending; /* first node in the tree */ 42 struct rb_node *first_pending; /* first node in the tree */
32 unsigned int nr_pending; /* # queued in the tree */ 43 unsigned int nr_pending; /* # queued in the tree */
@@ -69,12 +80,6 @@ struct throtl_grp {
69 80
70 unsigned int flags; 81 unsigned int flags;
71 82
72 /* Two lists for READ and WRITE */
73 struct bio_list bio_lists[2];
74
75 /* Number of queued bios on READ and WRITE lists */
76 unsigned int nr_queued[2];
77
78 /* bytes per second rate limits */ 83 /* bytes per second rate limits */
79 uint64_t bps[2]; 84 uint64_t bps[2];
80 85
@@ -193,6 +198,8 @@ alloc_stats:
193/* init a service_queue, assumes the caller zeroed it */ 198/* init a service_queue, assumes the caller zeroed it */
194static void throtl_service_queue_init(struct throtl_service_queue *sq) 199static void throtl_service_queue_init(struct throtl_service_queue *sq)
195{ 200{
201 bio_list_init(&sq->bio_lists[0]);
202 bio_list_init(&sq->bio_lists[1]);
196 sq->pending_tree = RB_ROOT; 203 sq->pending_tree = RB_ROOT;
197} 204}
198 205
@@ -204,8 +211,6 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
204 throtl_service_queue_init(&tg->service_queue); 211 throtl_service_queue_init(&tg->service_queue);
205 RB_CLEAR_NODE(&tg->rb_node); 212 RB_CLEAR_NODE(&tg->rb_node);
206 tg->td = blkg->q->td; 213 tg->td = blkg->q->td;
207 bio_list_init(&tg->bio_lists[0]);
208 bio_list_init(&tg->bio_lists[1]);
209 214
210 tg->bps[READ] = -1; 215 tg->bps[READ] = -1;
211 tg->bps[WRITE] = -1; 216 tg->bps[WRITE] = -1;
@@ -624,7 +629,8 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
624 * this function with a different bio if there are other bios 629 * this function with a different bio if there are other bios
625 * queued. 630 * queued.
626 */ 631 */
627 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw])); 632 BUG_ON(tg->service_queue.nr_queued[rw] &&
633 bio != bio_list_peek(&tg->service_queue.bio_lists[rw]));
628 634
629 /* If tg->bps = -1, then BW is unlimited */ 635 /* If tg->bps = -1, then BW is unlimited */
630 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) { 636 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
@@ -703,12 +709,13 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
703static void throtl_add_bio_tg(struct bio *bio, struct throtl_grp *tg, 709static void throtl_add_bio_tg(struct bio *bio, struct throtl_grp *tg,
704 struct throtl_service_queue *parent_sq) 710 struct throtl_service_queue *parent_sq)
705{ 711{
712 struct throtl_service_queue *sq = &tg->service_queue;
706 bool rw = bio_data_dir(bio); 713 bool rw = bio_data_dir(bio);
707 714
708 bio_list_add(&tg->bio_lists[rw], bio); 715 bio_list_add(&sq->bio_lists[rw], bio);
709 /* Take a bio reference on tg */ 716 /* Take a bio reference on tg */
710 blkg_get(tg_to_blkg(tg)); 717 blkg_get(tg_to_blkg(tg));
711 tg->nr_queued[rw]++; 718 sq->nr_queued[rw]++;
712 tg->td->nr_queued[rw]++; 719 tg->td->nr_queued[rw]++;
713 throtl_enqueue_tg(tg, parent_sq); 720 throtl_enqueue_tg(tg, parent_sq);
714} 721}
@@ -716,13 +723,14 @@ static void throtl_add_bio_tg(struct bio *bio, struct throtl_grp *tg,
716static void tg_update_disptime(struct throtl_grp *tg, 723static void tg_update_disptime(struct throtl_grp *tg,
717 struct throtl_service_queue *parent_sq) 724 struct throtl_service_queue *parent_sq)
718{ 725{
726 struct throtl_service_queue *sq = &tg->service_queue;
719 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime; 727 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
720 struct bio *bio; 728 struct bio *bio;
721 729
722 if ((bio = bio_list_peek(&tg->bio_lists[READ]))) 730 if ((bio = bio_list_peek(&sq->bio_lists[READ])))
723 tg_may_dispatch(tg, bio, &read_wait); 731 tg_may_dispatch(tg, bio, &read_wait);
724 732
725 if ((bio = bio_list_peek(&tg->bio_lists[WRITE]))) 733 if ((bio = bio_list_peek(&sq->bio_lists[WRITE])))
726 tg_may_dispatch(tg, bio, &write_wait); 734 tg_may_dispatch(tg, bio, &write_wait);
727 735
728 min_wait = min(read_wait, write_wait); 736 min_wait = min(read_wait, write_wait);
@@ -737,10 +745,11 @@ static void tg_update_disptime(struct throtl_grp *tg,
737static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw, 745static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw,
738 struct bio_list *bl) 746 struct bio_list *bl)
739{ 747{
748 struct throtl_service_queue *sq = &tg->service_queue;
740 struct bio *bio; 749 struct bio *bio;
741 750
742 bio = bio_list_pop(&tg->bio_lists[rw]); 751 bio = bio_list_pop(&sq->bio_lists[rw]);
743 tg->nr_queued[rw]--; 752 sq->nr_queued[rw]--;
744 /* Drop bio reference on blkg */ 753 /* Drop bio reference on blkg */
745 blkg_put(tg_to_blkg(tg)); 754 blkg_put(tg_to_blkg(tg));
746 755
@@ -756,6 +765,7 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw,
756 765
757static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl) 766static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl)
758{ 767{
768 struct throtl_service_queue *sq = &tg->service_queue;
759 unsigned int nr_reads = 0, nr_writes = 0; 769 unsigned int nr_reads = 0, nr_writes = 0;
760 unsigned int max_nr_reads = throtl_grp_quantum*3/4; 770 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
761 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads; 771 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
@@ -763,7 +773,7 @@ static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl)
763 773
764 /* Try to dispatch 75% READS and 25% WRITES */ 774 /* Try to dispatch 75% READS and 25% WRITES */
765 775
766 while ((bio = bio_list_peek(&tg->bio_lists[READ])) && 776 while ((bio = bio_list_peek(&sq->bio_lists[READ])) &&
767 tg_may_dispatch(tg, bio, NULL)) { 777 tg_may_dispatch(tg, bio, NULL)) {
768 778
769 tg_dispatch_one_bio(tg, bio_data_dir(bio), bl); 779 tg_dispatch_one_bio(tg, bio_data_dir(bio), bl);
@@ -773,7 +783,7 @@ static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl)
773 break; 783 break;
774 } 784 }
775 785
776 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])) && 786 while ((bio = bio_list_peek(&sq->bio_lists[WRITE])) &&
777 tg_may_dispatch(tg, bio, NULL)) { 787 tg_may_dispatch(tg, bio, NULL)) {
778 788
779 tg_dispatch_one_bio(tg, bio_data_dir(bio), bl); 789 tg_dispatch_one_bio(tg, bio_data_dir(bio), bl);
@@ -790,10 +800,10 @@ static int throtl_select_dispatch(struct throtl_service_queue *parent_sq,
790 struct bio_list *bl) 800 struct bio_list *bl)
791{ 801{
792 unsigned int nr_disp = 0; 802 unsigned int nr_disp = 0;
793 struct throtl_grp *tg;
794 803
795 while (1) { 804 while (1) {
796 tg = throtl_rb_first(parent_sq); 805 struct throtl_grp *tg = throtl_rb_first(parent_sq);
806 struct throtl_service_queue *sq = &tg->service_queue;
797 807
798 if (!tg) 808 if (!tg)
799 break; 809 break;
@@ -805,7 +815,7 @@ static int throtl_select_dispatch(struct throtl_service_queue *parent_sq,
805 815
806 nr_disp += throtl_dispatch_tg(tg, bl); 816 nr_disp += throtl_dispatch_tg(tg, bl);
807 817
808 if (tg->nr_queued[0] || tg->nr_queued[1]) 818 if (sq->nr_queued[0] || sq->nr_queued[1])
809 tg_update_disptime(tg, parent_sq); 819 tg_update_disptime(tg, parent_sq);
810 820
811 if (nr_disp >= throtl_quantum) 821 if (nr_disp >= throtl_quantum)
@@ -1043,6 +1053,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1043{ 1053{
1044 struct throtl_data *td = q->td; 1054 struct throtl_data *td = q->td;
1045 struct throtl_grp *tg; 1055 struct throtl_grp *tg;
1056 struct throtl_service_queue *sq;
1046 bool rw = bio_data_dir(bio), update_disptime = true; 1057 bool rw = bio_data_dir(bio), update_disptime = true;
1047 struct blkcg *blkcg; 1058 struct blkcg *blkcg;
1048 bool throttled = false; 1059 bool throttled = false;
@@ -1077,7 +1088,9 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1077 if (unlikely(!tg)) 1088 if (unlikely(!tg))
1078 goto out_unlock; 1089 goto out_unlock;
1079 1090
1080 if (tg->nr_queued[rw]) { 1091 sq = &tg->service_queue;
1092
1093 if (sq->nr_queued[rw]) {
1081 /* 1094 /*
1082 * There is already another bio queued in same dir. No 1095 * There is already another bio queued in same dir. No
1083 * need to update dispatch time. 1096 * need to update dispatch time.
@@ -1112,7 +1125,7 @@ queue_bio:
1112 rw == READ ? 'R' : 'W', 1125 rw == READ ? 'R' : 'W',
1113 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], 1126 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1114 tg->io_disp[rw], tg->iops[rw], 1127 tg->io_disp[rw], tg->iops[rw],
1115 tg->nr_queued[READ], tg->nr_queued[WRITE]); 1128 sq->nr_queued[READ], sq->nr_queued[WRITE]);
1116 1129
1117 bio_associate_current(bio); 1130 bio_associate_current(bio);
1118 throtl_add_bio_tg(bio, tg, &q->td->service_queue); 1131 throtl_add_bio_tg(bio, tg, &q->td->service_queue);
@@ -1151,11 +1164,13 @@ void blk_throtl_drain(struct request_queue *q)
1151 bio_list_init(&bl); 1164 bio_list_init(&bl);
1152 1165
1153 while ((tg = throtl_rb_first(parent_sq))) { 1166 while ((tg = throtl_rb_first(parent_sq))) {
1167 struct throtl_service_queue *sq = &tg->service_queue;
1168
1154 throtl_dequeue_tg(tg, parent_sq); 1169 throtl_dequeue_tg(tg, parent_sq);
1155 1170
1156 while ((bio = bio_list_peek(&tg->bio_lists[READ]))) 1171 while ((bio = bio_list_peek(&sq->bio_lists[READ])))
1157 tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl); 1172 tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl);
1158 while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))) 1173 while ((bio = bio_list_peek(&sq->bio_lists[WRITE])))
1159 tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl); 1174 tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl);
1160 } 1175 }
1161 spin_unlock_irq(q->queue_lock); 1176 spin_unlock_irq(q->queue_lock);