aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-throttle.c53
1 files changed, 49 insertions, 4 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index c29a5a8cc18c..a62be8d0dc1b 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -229,6 +229,22 @@ __throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
229 } 229 }
230} 230}
231 231
232/*
233 * Should be called with without queue lock held. Here queue lock will be
234 * taken rarely. It will be taken only once during life time of a group
235 * if need be
236 */
237static void
238throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
239{
240 if (!tg || tg->blkg.dev)
241 return;
242
243 spin_lock_irq(td->queue->queue_lock);
244 __throtl_tg_fill_dev_details(td, tg);
245 spin_unlock_irq(td->queue->queue_lock);
246}
247
232static void throtl_init_add_tg_lists(struct throtl_data *td, 248static void throtl_init_add_tg_lists(struct throtl_data *td,
233 struct throtl_grp *tg, struct blkio_cgroup *blkcg) 249 struct throtl_grp *tg, struct blkio_cgroup *blkcg)
234{ 250{
@@ -666,6 +682,12 @@ static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
666 return 0; 682 return 0;
667} 683}
668 684
685static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
686 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
687 return 1;
688 return 0;
689}
690
669/* 691/*
670 * Returns whether one can dispatch a bio or not. Also returns approx number 692 * Returns whether one can dispatch a bio or not. Also returns approx number
671 * of jiffies to wait before this bio is with-in IO rate and can be dispatched 693 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
@@ -730,10 +752,6 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
730 tg->bytes_disp[rw] += bio->bi_size; 752 tg->bytes_disp[rw] += bio->bi_size;
731 tg->io_disp[rw]++; 753 tg->io_disp[rw]++;
732 754
733 /*
734 * TODO: This will take blkg->stats_lock. Figure out a way
735 * to avoid this cost.
736 */
737 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync); 755 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
738} 756}
739 757
@@ -1111,12 +1129,39 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
1111 struct throtl_grp *tg; 1129 struct throtl_grp *tg;
1112 struct bio *bio = *biop; 1130 struct bio *bio = *biop;
1113 bool rw = bio_data_dir(bio), update_disptime = true; 1131 bool rw = bio_data_dir(bio), update_disptime = true;
1132 struct blkio_cgroup *blkcg;
1114 1133
1115 if (bio->bi_rw & REQ_THROTTLED) { 1134 if (bio->bi_rw & REQ_THROTTLED) {
1116 bio->bi_rw &= ~REQ_THROTTLED; 1135 bio->bi_rw &= ~REQ_THROTTLED;
1117 return 0; 1136 return 0;
1118 } 1137 }
1119 1138
1139 /*
1140 * A throtl_grp pointer retrieved under rcu can be used to access
1141 * basic fields like stats and io rates. If a group has no rules,
1142 * just update the dispatch stats in lockless manner and return.
1143 */
1144
1145 rcu_read_lock();
1146 blkcg = task_blkio_cgroup(current);
1147 tg = throtl_find_tg(td, blkcg);
1148 if (tg) {
1149 throtl_tg_fill_dev_details(td, tg);
1150
1151 if (tg_no_rule_group(tg, rw)) {
1152 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
1153 rw, bio->bi_rw & REQ_SYNC);
1154 rcu_read_unlock();
1155 return 0;
1156 }
1157 }
1158 rcu_read_unlock();
1159
1160 /*
1161 * Either group has not been allocated yet or it is not an unlimited
1162 * IO group
1163 */
1164
1120 spin_lock_irq(q->queue_lock); 1165 spin_lock_irq(q->queue_lock);
1121 tg = throtl_get_tg(td); 1166 tg = throtl_get_tg(td);
1122 1167