aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c57
-rw-r--r--block/blk-sysfs.c1
-rw-r--r--block/blk-throttle.c50
-rw-r--r--block/blk.h6
-rw-r--r--block/elevator.c2
5 files changed, 87 insertions, 29 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 034cbb2024f0..7e1523521c70 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -349,11 +349,13 @@ EXPORT_SYMBOL(blk_put_queue);
349/** 349/**
350 * blk_drain_queue - drain requests from request_queue 350 * blk_drain_queue - drain requests from request_queue
351 * @q: queue to drain 351 * @q: queue to drain
352 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
352 * 353 *
353 * Drain ELV_PRIV requests from @q. The caller is responsible for ensuring 354 * Drain requests from @q. If @drain_all is set, all requests are drained.
354 * that no new requests which need to be drained are queued. 355 * If not, only ELVPRIV requests are drained. The caller is responsible
356 * for ensuring that no new requests which need to be drained are queued.
355 */ 357 */
356void blk_drain_queue(struct request_queue *q) 358void blk_drain_queue(struct request_queue *q, bool drain_all)
357{ 359{
358 while (true) { 360 while (true) {
359 int nr_rqs; 361 int nr_rqs;
@@ -361,9 +363,15 @@ void blk_drain_queue(struct request_queue *q)
361 spin_lock_irq(q->queue_lock); 363 spin_lock_irq(q->queue_lock);
362 364
363 elv_drain_elevator(q); 365 elv_drain_elevator(q);
366 if (drain_all)
367 blk_throtl_drain(q);
364 368
365 __blk_run_queue(q); 369 __blk_run_queue(q);
366 nr_rqs = q->rq.elvpriv; 370
371 if (drain_all)
372 nr_rqs = q->rq.count[0] + q->rq.count[1];
373 else
374 nr_rqs = q->rq.elvpriv;
367 375
368 spin_unlock_irq(q->queue_lock); 376 spin_unlock_irq(q->queue_lock);
369 377
@@ -373,30 +381,40 @@ void blk_drain_queue(struct request_queue *q)
373 } 381 }
374} 382}
375 383
376/* 384/**
377 * Note: If a driver supplied the queue lock, it is disconnected 385 * blk_cleanup_queue - shutdown a request queue
378 * by this function. The actual state of the lock doesn't matter 386 * @q: request queue to shutdown
379 * here as the request_queue isn't accessible after this point 387 *
380 * (QUEUE_FLAG_DEAD is set) and no other requests will be queued. 388 * Mark @q DEAD, drain all pending requests, destroy and put it. All
389 * future requests will be failed immediately with -ENODEV.
381 */ 390 */
382void blk_cleanup_queue(struct request_queue *q) 391void blk_cleanup_queue(struct request_queue *q)
383{ 392{
384 /* 393 spinlock_t *lock = q->queue_lock;
385 * We know we have process context here, so we can be a little
386 * cautious and ensure that pending block actions on this device
387 * are done before moving on. Going into this function, we should
388 * not have processes doing IO to this device.
389 */
390 blk_sync_queue(q);
391 394
392 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); 395 /* mark @q DEAD, no new request or merges will be allowed afterwards */
393 mutex_lock(&q->sysfs_lock); 396 mutex_lock(&q->sysfs_lock);
394 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 397 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
395 mutex_unlock(&q->sysfs_lock); 398
399 spin_lock_irq(lock);
400 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
401 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
402 queue_flag_set(QUEUE_FLAG_DEAD, q);
396 403
397 if (q->queue_lock != &q->__queue_lock) 404 if (q->queue_lock != &q->__queue_lock)
398 q->queue_lock = &q->__queue_lock; 405 q->queue_lock = &q->__queue_lock;
399 406
407 spin_unlock_irq(lock);
408 mutex_unlock(&q->sysfs_lock);
409
410 /* drain all requests queued before DEAD marking */
411 blk_drain_queue(q, true);
412
413 /* @q won't process any more request, flush async actions */
414 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
415 blk_sync_queue(q);
416
417 /* @q is and will stay empty, shutdown and put */
400 blk_put_queue(q); 418 blk_put_queue(q);
401} 419}
402EXPORT_SYMBOL(blk_cleanup_queue); 420EXPORT_SYMBOL(blk_cleanup_queue);
@@ -1509,9 +1527,6 @@ generic_make_request_checks(struct bio *bio)
1509 goto end_io; 1527 goto end_io;
1510 } 1528 }
1511 1529
1512 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1513 goto end_io;
1514
1515 part = bio->bi_bdev->bd_part; 1530 part = bio->bi_bdev->bd_part;
1516 if (should_fail_request(part, bio->bi_size) || 1531 if (should_fail_request(part, bio->bi_size) ||
1517 should_fail_request(&part_to_disk(part)->part0, 1532 should_fail_request(&part_to_disk(part)->part0,
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index a8eff5f8b9c5..e7f9f657f105 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -490,6 +490,7 @@ static void blk_release_queue(struct kobject *kobj)
490 if (q->queue_tags) 490 if (q->queue_tags)
491 __blk_queue_free_tags(q); 491 __blk_queue_free_tags(q);
492 492
493 blk_throtl_release(q);
493 blk_trace_shutdown(q); 494 blk_trace_shutdown(q);
494 495
495 bdi_destroy(&q->backing_dev_info); 496 bdi_destroy(&q->backing_dev_info);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 900a0c98745b..8edb9499b509 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -309,6 +309,10 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
309 struct blkio_cgroup *blkcg; 309 struct blkio_cgroup *blkcg;
310 struct request_queue *q = td->queue; 310 struct request_queue *q = td->queue;
311 311
312 /* no throttling for dead queue */
313 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
314 return NULL;
315
312 rcu_read_lock(); 316 rcu_read_lock();
313 blkcg = task_blkio_cgroup(current); 317 blkcg = task_blkio_cgroup(current);
314 tg = throtl_find_tg(td, blkcg); 318 tg = throtl_find_tg(td, blkcg);
@@ -1001,11 +1005,6 @@ static void throtl_release_tgs(struct throtl_data *td)
1001 } 1005 }
1002} 1006}
1003 1007
1004static void throtl_td_free(struct throtl_data *td)
1005{
1006 kfree(td);
1007}
1008
1009/* 1008/*
1010 * Blk cgroup controller notification saying that blkio_group object is being 1009 * Blk cgroup controller notification saying that blkio_group object is being
1011 * delinked as associated cgroup object is going away. That also means that 1010 * delinked as associated cgroup object is going away. That also means that
@@ -1204,6 +1203,41 @@ out:
1204 return throttled; 1203 return throttled;
1205} 1204}
1206 1205
1206/**
1207 * blk_throtl_drain - drain throttled bios
1208 * @q: request_queue to drain throttled bios for
1209 *
1210 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1211 */
1212void blk_throtl_drain(struct request_queue *q)
1213 __releases(q->queue_lock) __acquires(q->queue_lock)
1214{
1215 struct throtl_data *td = q->td;
1216 struct throtl_rb_root *st = &td->tg_service_tree;
1217 struct throtl_grp *tg;
1218 struct bio_list bl;
1219 struct bio *bio;
1220
1221 lockdep_is_held(q->queue_lock);
1222
1223 bio_list_init(&bl);
1224
1225 while ((tg = throtl_rb_first(st))) {
1226 throtl_dequeue_tg(td, tg);
1227
1228 while ((bio = bio_list_peek(&tg->bio_lists[READ])))
1229 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1230 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
1231 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1232 }
1233 spin_unlock_irq(q->queue_lock);
1234
1235 while ((bio = bio_list_pop(&bl)))
1236 generic_make_request(bio);
1237
1238 spin_lock_irq(q->queue_lock);
1239}
1240
1207int blk_throtl_init(struct request_queue *q) 1241int blk_throtl_init(struct request_queue *q)
1208{ 1242{
1209 struct throtl_data *td; 1243 struct throtl_data *td;
@@ -1276,7 +1310,11 @@ void blk_throtl_exit(struct request_queue *q)
1276 * it. 1310 * it.
1277 */ 1311 */
1278 throtl_shutdown_wq(q); 1312 throtl_shutdown_wq(q);
1279 throtl_td_free(td); 1313}
1314
1315void blk_throtl_release(struct request_queue *q)
1316{
1317 kfree(q->td);
1280} 1318}
1281 1319
1282static int __init throtl_init(void) 1320static int __init throtl_init(void)
diff --git a/block/blk.h b/block/blk.h
index c018dba4e335..3f6551b3c92d 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -15,7 +15,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
15 struct bio *bio); 15 struct bio *bio);
16int blk_rq_append_bio(struct request_queue *q, struct request *rq, 16int blk_rq_append_bio(struct request_queue *q, struct request *rq,
17 struct bio *bio); 17 struct bio *bio);
18void blk_drain_queue(struct request_queue *q); 18void blk_drain_queue(struct request_queue *q, bool drain_all);
19void blk_dequeue_request(struct request *rq); 19void blk_dequeue_request(struct request *rq);
20void __blk_queue_free_tags(struct request_queue *q); 20void __blk_queue_free_tags(struct request_queue *q);
21bool __blk_end_bidi_request(struct request *rq, int error, 21bool __blk_end_bidi_request(struct request *rq, int error,
@@ -191,15 +191,19 @@ static inline int blk_do_io_stat(struct request *rq)
191 191
192#ifdef CONFIG_BLK_DEV_THROTTLING 192#ifdef CONFIG_BLK_DEV_THROTTLING
193extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio); 193extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
194extern void blk_throtl_drain(struct request_queue *q);
194extern int blk_throtl_init(struct request_queue *q); 195extern int blk_throtl_init(struct request_queue *q);
195extern void blk_throtl_exit(struct request_queue *q); 196extern void blk_throtl_exit(struct request_queue *q);
197extern void blk_throtl_release(struct request_queue *q);
196#else /* CONFIG_BLK_DEV_THROTTLING */ 198#else /* CONFIG_BLK_DEV_THROTTLING */
197static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) 199static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
198{ 200{
199 return false; 201 return false;
200} 202}
203static inline void blk_throtl_drain(struct request_queue *q) { }
201static inline int blk_throtl_init(struct request_queue *q) { return 0; } 204static inline int blk_throtl_init(struct request_queue *q) { return 0; }
202static inline void blk_throtl_exit(struct request_queue *q) { } 205static inline void blk_throtl_exit(struct request_queue *q) { }
206static inline void blk_throtl_release(struct request_queue *q) { }
203#endif /* CONFIG_BLK_DEV_THROTTLING */ 207#endif /* CONFIG_BLK_DEV_THROTTLING */
204 208
205#endif /* BLK_INTERNAL_H */ 209#endif /* BLK_INTERNAL_H */
diff --git a/block/elevator.c b/block/elevator.c
index 74a277ffed39..66343d6917d0 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -626,7 +626,7 @@ void elv_quiesce_start(struct request_queue *q)
626 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); 626 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
627 spin_unlock_irq(q->queue_lock); 627 spin_unlock_irq(q->queue_lock);
628 628
629 blk_drain_queue(q); 629 blk_drain_queue(q, false);
630} 630}
631 631
632void elv_quiesce_end(struct request_queue *q) 632void elv_quiesce_end(struct request_queue *q)