diff options
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r-- | block/blk-throttle.c | 49 |
1 files changed, 17 insertions, 32 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index ecba5fcef201..900a0c98745b 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -303,10 +303,6 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) | |||
303 | return tg; | 303 | return tg; |
304 | } | 304 | } |
305 | 305 | ||
306 | /* | ||
307 | * This function returns with queue lock unlocked in case of error, like | ||
308 | * request queue is no more | ||
309 | */ | ||
310 | static struct throtl_grp * throtl_get_tg(struct throtl_data *td) | 306 | static struct throtl_grp * throtl_get_tg(struct throtl_data *td) |
311 | { | 307 | { |
312 | struct throtl_grp *tg = NULL, *__tg = NULL; | 308 | struct throtl_grp *tg = NULL, *__tg = NULL; |
@@ -330,20 +326,16 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) | |||
330 | spin_unlock_irq(q->queue_lock); | 326 | spin_unlock_irq(q->queue_lock); |
331 | 327 | ||
332 | tg = throtl_alloc_tg(td); | 328 | tg = throtl_alloc_tg(td); |
333 | /* | ||
334 | * We might have slept in group allocation. Make sure queue is not | ||
335 | * dead | ||
336 | */ | ||
337 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { | ||
338 | if (tg) | ||
339 | kfree(tg); | ||
340 | |||
341 | return ERR_PTR(-ENODEV); | ||
342 | } | ||
343 | 329 | ||
344 | /* Group allocated and queue is still alive. take the lock */ | 330 | /* Group allocated and queue is still alive. take the lock */ |
345 | spin_lock_irq(q->queue_lock); | 331 | spin_lock_irq(q->queue_lock); |
346 | 332 | ||
333 | /* Make sure @q is still alive */ | ||
334 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { | ||
335 | kfree(tg); | ||
336 | return NULL; | ||
337 | } | ||
338 | |||
347 | /* | 339 | /* |
348 | * Initialize the new group. After sleeping, read the blkcg again. | 340 | * Initialize the new group. After sleeping, read the blkcg again. |
349 | */ | 341 | */ |
@@ -1118,17 +1110,17 @@ static struct blkio_policy_type blkio_policy_throtl = { | |||
1118 | .plid = BLKIO_POLICY_THROTL, | 1110 | .plid = BLKIO_POLICY_THROTL, |
1119 | }; | 1111 | }; |
1120 | 1112 | ||
1121 | int blk_throtl_bio(struct request_queue *q, struct bio **biop) | 1113 | bool blk_throtl_bio(struct request_queue *q, struct bio *bio) |
1122 | { | 1114 | { |
1123 | struct throtl_data *td = q->td; | 1115 | struct throtl_data *td = q->td; |
1124 | struct throtl_grp *tg; | 1116 | struct throtl_grp *tg; |
1125 | struct bio *bio = *biop; | ||
1126 | bool rw = bio_data_dir(bio), update_disptime = true; | 1117 | bool rw = bio_data_dir(bio), update_disptime = true; |
1127 | struct blkio_cgroup *blkcg; | 1118 | struct blkio_cgroup *blkcg; |
1119 | bool throttled = false; | ||
1128 | 1120 | ||
1129 | if (bio->bi_rw & REQ_THROTTLED) { | 1121 | if (bio->bi_rw & REQ_THROTTLED) { |
1130 | bio->bi_rw &= ~REQ_THROTTLED; | 1122 | bio->bi_rw &= ~REQ_THROTTLED; |
1131 | return 0; | 1123 | goto out; |
1132 | } | 1124 | } |
1133 | 1125 | ||
1134 | /* | 1126 | /* |
@@ -1147,7 +1139,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) | |||
1147 | blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, | 1139 | blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, |
1148 | rw, rw_is_sync(bio->bi_rw)); | 1140 | rw, rw_is_sync(bio->bi_rw)); |
1149 | rcu_read_unlock(); | 1141 | rcu_read_unlock(); |
1150 | return 0; | 1142 | goto out; |
1151 | } | 1143 | } |
1152 | } | 1144 | } |
1153 | rcu_read_unlock(); | 1145 | rcu_read_unlock(); |
@@ -1156,18 +1148,10 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) | |||
1156 | * Either group has not been allocated yet or it is not an unlimited | 1148 | * Either group has not been allocated yet or it is not an unlimited |
1157 | * IO group | 1149 | * IO group |
1158 | */ | 1150 | */ |
1159 | |||
1160 | spin_lock_irq(q->queue_lock); | 1151 | spin_lock_irq(q->queue_lock); |
1161 | tg = throtl_get_tg(td); | 1152 | tg = throtl_get_tg(td); |
1162 | 1153 | if (unlikely(!tg)) | |
1163 | if (IS_ERR(tg)) { | 1154 | goto out_unlock; |
1164 | if (PTR_ERR(tg) == -ENODEV) { | ||
1165 | /* | ||
1166 | * Queue is gone. No queue lock held here. | ||
1167 | */ | ||
1168 | return -ENODEV; | ||
1169 | } | ||
1170 | } | ||
1171 | 1155 | ||
1172 | if (tg->nr_queued[rw]) { | 1156 | if (tg->nr_queued[rw]) { |
1173 | /* | 1157 | /* |
@@ -1195,7 +1179,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) | |||
1195 | * So keep on trimming slice even if bio is not queued. | 1179 | * So keep on trimming slice even if bio is not queued. |
1196 | */ | 1180 | */ |
1197 | throtl_trim_slice(td, tg, rw); | 1181 | throtl_trim_slice(td, tg, rw); |
1198 | goto out; | 1182 | goto out_unlock; |
1199 | } | 1183 | } |
1200 | 1184 | ||
1201 | queue_bio: | 1185 | queue_bio: |
@@ -1207,16 +1191,17 @@ queue_bio: | |||
1207 | tg->nr_queued[READ], tg->nr_queued[WRITE]); | 1191 | tg->nr_queued[READ], tg->nr_queued[WRITE]); |
1208 | 1192 | ||
1209 | throtl_add_bio_tg(q->td, tg, bio); | 1193 | throtl_add_bio_tg(q->td, tg, bio); |
1210 | *biop = NULL; | 1194 | throttled = true; |
1211 | 1195 | ||
1212 | if (update_disptime) { | 1196 | if (update_disptime) { |
1213 | tg_update_disptime(td, tg); | 1197 | tg_update_disptime(td, tg); |
1214 | throtl_schedule_next_dispatch(td); | 1198 | throtl_schedule_next_dispatch(td); |
1215 | } | 1199 | } |
1216 | 1200 | ||
1217 | out: | 1201 | out_unlock: |
1218 | spin_unlock_irq(q->queue_lock); | 1202 | spin_unlock_irq(q->queue_lock); |
1219 | return 0; | 1203 | out: |
1204 | return throttled; | ||
1220 | } | 1205 | } |
1221 | 1206 | ||
1222 | int blk_throtl_init(struct request_queue *q) | 1207 | int blk_throtl_init(struct request_queue *q) |