summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c8
-rw-r--r--block/blk-throttle.c49
-rw-r--r--block/blk.h6
3 files changed, 22 insertions, 41 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 149149dd7f7b..6c491f2388e9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1515,12 +1515,8 @@ generic_make_request_checks(struct bio *bio)
1515 goto end_io; 1515 goto end_io;
1516 } 1516 }
1517 1517
1518 if (blk_throtl_bio(q, &bio)) 1518 if (blk_throtl_bio(q, bio))
1519 goto end_io; 1519 return false; /* throttled, will be resubmitted later */
1520
1521 /* if bio = NULL, bio has been throttled and will be submitted later. */
1522 if (!bio)
1523 return false;
1524 1520
1525 trace_block_bio_queue(q, bio); 1521 trace_block_bio_queue(q, bio);
1526 return true; 1522 return true;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index ecba5fcef201..900a0c98745b 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -303,10 +303,6 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
303 return tg; 303 return tg;
304} 304}
305 305
306/*
307 * This function returns with queue lock unlocked in case of error, like
308 * request queue is no more
309 */
310static struct throtl_grp * throtl_get_tg(struct throtl_data *td) 306static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
311{ 307{
312 struct throtl_grp *tg = NULL, *__tg = NULL; 308 struct throtl_grp *tg = NULL, *__tg = NULL;
@@ -330,20 +326,16 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
330 spin_unlock_irq(q->queue_lock); 326 spin_unlock_irq(q->queue_lock);
331 327
332 tg = throtl_alloc_tg(td); 328 tg = throtl_alloc_tg(td);
333 /*
334 * We might have slept in group allocation. Make sure queue is not
335 * dead
336 */
337 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
338 if (tg)
339 kfree(tg);
340
341 return ERR_PTR(-ENODEV);
342 }
343 329
344 /* Group allocated and queue is still alive. take the lock */ 330 /* Group allocated and queue is still alive. take the lock */
345 spin_lock_irq(q->queue_lock); 331 spin_lock_irq(q->queue_lock);
346 332
333 /* Make sure @q is still alive */
334 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
335 kfree(tg);
336 return NULL;
337 }
338
347 /* 339 /*
348 * Initialize the new group. After sleeping, read the blkcg again. 340 * Initialize the new group. After sleeping, read the blkcg again.
349 */ 341 */
@@ -1118,17 +1110,17 @@ static struct blkio_policy_type blkio_policy_throtl = {
1118 .plid = BLKIO_POLICY_THROTL, 1110 .plid = BLKIO_POLICY_THROTL,
1119}; 1111};
1120 1112
1121int blk_throtl_bio(struct request_queue *q, struct bio **biop) 1113bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1122{ 1114{
1123 struct throtl_data *td = q->td; 1115 struct throtl_data *td = q->td;
1124 struct throtl_grp *tg; 1116 struct throtl_grp *tg;
1125 struct bio *bio = *biop;
1126 bool rw = bio_data_dir(bio), update_disptime = true; 1117 bool rw = bio_data_dir(bio), update_disptime = true;
1127 struct blkio_cgroup *blkcg; 1118 struct blkio_cgroup *blkcg;
1119 bool throttled = false;
1128 1120
1129 if (bio->bi_rw & REQ_THROTTLED) { 1121 if (bio->bi_rw & REQ_THROTTLED) {
1130 bio->bi_rw &= ~REQ_THROTTLED; 1122 bio->bi_rw &= ~REQ_THROTTLED;
1131 return 0; 1123 goto out;
1132 } 1124 }
1133 1125
1134 /* 1126 /*
@@ -1147,7 +1139,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
1147 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, 1139 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
1148 rw, rw_is_sync(bio->bi_rw)); 1140 rw, rw_is_sync(bio->bi_rw));
1149 rcu_read_unlock(); 1141 rcu_read_unlock();
1150 return 0; 1142 goto out;
1151 } 1143 }
1152 } 1144 }
1153 rcu_read_unlock(); 1145 rcu_read_unlock();
@@ -1156,18 +1148,10 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
1156 * Either group has not been allocated yet or it is not an unlimited 1148 * Either group has not been allocated yet or it is not an unlimited
1157 * IO group 1149 * IO group
1158 */ 1150 */
1159
1160 spin_lock_irq(q->queue_lock); 1151 spin_lock_irq(q->queue_lock);
1161 tg = throtl_get_tg(td); 1152 tg = throtl_get_tg(td);
1162 1153 if (unlikely(!tg))
1163 if (IS_ERR(tg)) { 1154 goto out_unlock;
1164 if (PTR_ERR(tg) == -ENODEV) {
1165 /*
1166 * Queue is gone. No queue lock held here.
1167 */
1168 return -ENODEV;
1169 }
1170 }
1171 1155
1172 if (tg->nr_queued[rw]) { 1156 if (tg->nr_queued[rw]) {
1173 /* 1157 /*
@@ -1195,7 +1179,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
1195 * So keep on trimming slice even if bio is not queued. 1179 * So keep on trimming slice even if bio is not queued.
1196 */ 1180 */
1197 throtl_trim_slice(td, tg, rw); 1181 throtl_trim_slice(td, tg, rw);
1198 goto out; 1182 goto out_unlock;
1199 } 1183 }
1200 1184
1201queue_bio: 1185queue_bio:
@@ -1207,16 +1191,17 @@ queue_bio:
1207 tg->nr_queued[READ], tg->nr_queued[WRITE]); 1191 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1208 1192
1209 throtl_add_bio_tg(q->td, tg, bio); 1193 throtl_add_bio_tg(q->td, tg, bio);
1210 *biop = NULL; 1194 throttled = true;
1211 1195
1212 if (update_disptime) { 1196 if (update_disptime) {
1213 tg_update_disptime(td, tg); 1197 tg_update_disptime(td, tg);
1214 throtl_schedule_next_dispatch(td); 1198 throtl_schedule_next_dispatch(td);
1215 } 1199 }
1216 1200
1217out: 1201out_unlock:
1218 spin_unlock_irq(q->queue_lock); 1202 spin_unlock_irq(q->queue_lock);
1219 return 0; 1203out:
1204 return throttled;
1220} 1205}
1221 1206
1222int blk_throtl_init(struct request_queue *q) 1207int blk_throtl_init(struct request_queue *q)
diff --git a/block/blk.h b/block/blk.h
index 2b66dc21a493..c018dba4e335 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -190,13 +190,13 @@ static inline int blk_do_io_stat(struct request *rq)
190} 190}
191 191
192#ifdef CONFIG_BLK_DEV_THROTTLING 192#ifdef CONFIG_BLK_DEV_THROTTLING
193extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); 193extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
194extern int blk_throtl_init(struct request_queue *q); 194extern int blk_throtl_init(struct request_queue *q);
195extern void blk_throtl_exit(struct request_queue *q); 195extern void blk_throtl_exit(struct request_queue *q);
196#else /* CONFIG_BLK_DEV_THROTTLING */ 196#else /* CONFIG_BLK_DEV_THROTTLING */
197static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) 197static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
198{ 198{
199 return 0; 199 return false;
200} 200}
201static inline int blk_throtl_init(struct request_queue *q) { return 0; } 201static inline int blk_throtl_init(struct request_queue *q) { return 0; }
202static inline void blk_throtl_exit(struct request_queue *q) { } 202static inline void blk_throtl_exit(struct request_queue *q) { }