diff options
author | Vivek Goyal <vgoyal@redhat.com> | 2011-03-02 19:05:33 -0500 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-03-02 19:06:49 -0500 |
commit | da527770007fce8e4541947d47918248286da875 (patch) | |
tree | fc518172887409133e8dea827067209adfdb1c3c | |
parent | cd25f54961273c2e4cbd47441e04832468382a5e (diff) |
block: Move blk_throtl_exit() call to blk_cleanup_queue()
Move blk_throtl_exit() in blk_cleanup_queue() as blk_throtl_exit() is
written in such a way that it needs queue lock. In blk_release_queue()
there is no gurantee that ->queue_lock is still around.
Initially blk_throtl_exit() was in blk_cleanup_queue() but Ingo reported
one problem.
https://lkml.org/lkml/2010/10/23/86
And a quick fix moved blk_throtl_exit() to blk_release_queue().
commit 7ad58c028652753814054f4e3ac58f925e7343f4
Author: Jens Axboe <jaxboe@fusionio.com>
Date: Sat Oct 23 20:40:26 2010 +0200
block: fix use-after-free bug in blk throttle code
This patch reverts above change and does not try to shutdown the
throtl work in blk_sync_queue(). By avoiding call to
throtl_shutdown_timer_wq() from blk_sync_queue(), we should also avoid
the problem reported by Ingo.
blk_sync_queue() seems to be used only by md driver and it seems to be
using it to make sure q->unplug_fn is not called as md registers its
own unplug functions and it is about to free up the data structures
used by unplug_fn(). Block throttle does not call back into unplug_fn()
or into md. So there is no need to cancel blk throttle work.
In fact I think cancelling block throttle work is bad because it might
happen that some bios are throttled and scheduled to be dispatched later
with the help of pending work and if work is cancelled, these bios might
never be dispatched.
Block layer also uses blk_sync_queue() during blk_cleanup_queue() and
blk_release_queue() time. That should be safe as we are also calling
blk_throtl_exit() which should make sure all the throttling related
data structures are cleaned up.
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r-- | block/blk-core.c | 7 | ||||
-rw-r--r-- | block/blk-sysfs.c | 2 | ||||
-rw-r--r-- | block/blk-throttle.c | 6 | ||||
-rw-r--r-- | include/linux/blkdev.h | 2 |
4 files changed, 9 insertions, 8 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index bc2b7c5004e1..accff29ad674 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -380,13 +380,16 @@ EXPORT_SYMBOL(blk_stop_queue); | |||
380 | * that its ->make_request_fn will not re-add plugging prior to calling | 380 | * that its ->make_request_fn will not re-add plugging prior to calling |
381 | * this function. | 381 | * this function. |
382 | * | 382 | * |
383 | * This function does not cancel any asynchronous activity arising | ||
384 | * out of elevator or throttling code. That would require elevaotor_exit() | ||
385 | * and blk_throtl_exit() to be called with queue lock initialized. | ||
386 | * | ||
383 | */ | 387 | */ |
384 | void blk_sync_queue(struct request_queue *q) | 388 | void blk_sync_queue(struct request_queue *q) |
385 | { | 389 | { |
386 | del_timer_sync(&q->unplug_timer); | 390 | del_timer_sync(&q->unplug_timer); |
387 | del_timer_sync(&q->timeout); | 391 | del_timer_sync(&q->timeout); |
388 | cancel_work_sync(&q->unplug_work); | 392 | cancel_work_sync(&q->unplug_work); |
389 | throtl_shutdown_timer_wq(q); | ||
390 | } | 393 | } |
391 | EXPORT_SYMBOL(blk_sync_queue); | 394 | EXPORT_SYMBOL(blk_sync_queue); |
392 | 395 | ||
@@ -469,6 +472,8 @@ void blk_cleanup_queue(struct request_queue *q) | |||
469 | if (q->elevator) | 472 | if (q->elevator) |
470 | elevator_exit(q->elevator); | 473 | elevator_exit(q->elevator); |
471 | 474 | ||
475 | blk_throtl_exit(q); | ||
476 | |||
472 | blk_put_queue(q); | 477 | blk_put_queue(q); |
473 | } | 478 | } |
474 | EXPORT_SYMBOL(blk_cleanup_queue); | 479 | EXPORT_SYMBOL(blk_cleanup_queue); |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 41fb69150b4d..261c75c665ae 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -471,8 +471,6 @@ static void blk_release_queue(struct kobject *kobj) | |||
471 | 471 | ||
472 | blk_sync_queue(q); | 472 | blk_sync_queue(q); |
473 | 473 | ||
474 | blk_throtl_exit(q); | ||
475 | |||
476 | if (rl->rq_pool) | 474 | if (rl->rq_pool) |
477 | mempool_destroy(rl->rq_pool); | 475 | mempool_destroy(rl->rq_pool); |
478 | 476 | ||
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a89043a3caa4..c0f623742165 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -965,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key, | |||
965 | throtl_schedule_delayed_work(td->queue, 0); | 965 | throtl_schedule_delayed_work(td->queue, 0); |
966 | } | 966 | } |
967 | 967 | ||
968 | void throtl_shutdown_timer_wq(struct request_queue *q) | 968 | static void throtl_shutdown_wq(struct request_queue *q) |
969 | { | 969 | { |
970 | struct throtl_data *td = q->td; | 970 | struct throtl_data *td = q->td; |
971 | 971 | ||
@@ -1099,7 +1099,7 @@ void blk_throtl_exit(struct request_queue *q) | |||
1099 | 1099 | ||
1100 | BUG_ON(!td); | 1100 | BUG_ON(!td); |
1101 | 1101 | ||
1102 | throtl_shutdown_timer_wq(q); | 1102 | throtl_shutdown_wq(q); |
1103 | 1103 | ||
1104 | spin_lock_irq(q->queue_lock); | 1104 | spin_lock_irq(q->queue_lock); |
1105 | throtl_release_tgs(td); | 1105 | throtl_release_tgs(td); |
@@ -1129,7 +1129,7 @@ void blk_throtl_exit(struct request_queue *q) | |||
1129 | * update limits through cgroup and another work got queued, cancel | 1129 | * update limits through cgroup and another work got queued, cancel |
1130 | * it. | 1130 | * it. |
1131 | */ | 1131 | */ |
1132 | throtl_shutdown_timer_wq(q); | 1132 | throtl_shutdown_wq(q); |
1133 | throtl_td_free(td); | 1133 | throtl_td_free(td); |
1134 | } | 1134 | } |
1135 | 1135 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e3ee74fc5903..23fb92506c31 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -1144,7 +1144,6 @@ extern int blk_throtl_init(struct request_queue *q); | |||
1144 | extern void blk_throtl_exit(struct request_queue *q); | 1144 | extern void blk_throtl_exit(struct request_queue *q); |
1145 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); | 1145 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); |
1146 | extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay); | 1146 | extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay); |
1147 | extern void throtl_shutdown_timer_wq(struct request_queue *q); | ||
1148 | #else /* CONFIG_BLK_DEV_THROTTLING */ | 1147 | #else /* CONFIG_BLK_DEV_THROTTLING */ |
1149 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) | 1148 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) |
1150 | { | 1149 | { |
@@ -1154,7 +1153,6 @@ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) | |||
1154 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | 1153 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } |
1155 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } | 1154 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } |
1156 | static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {} | 1155 | static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {} |
1157 | static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} | ||
1158 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | 1156 | #endif /* CONFIG_BLK_DEV_THROTTLING */ |
1159 | 1157 | ||
1160 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 1158 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |