diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2009-10-05 02:52:35 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-10-05 05:03:58 -0400 |
commit | 23e018a1b083ecb4b8bb2fb43d58e7c19b5d7959 (patch) | |
tree | a6361448bd596ccef393778e6f85e29413a01213 /block | |
parent | 48e025e63ac908ed6ec5394a294f4ecd510a7476 (diff) |
block: get rid of kblock_schedule_delayed_work()
It was briefly introduced to allow CFQ to to delayed scheduling,
but we ended up removing that feature again. So lets kill the
function and export, and just switch CFQ back to the normal work
schedule since it is now passing in a '0' delay from all call
sites.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 8 | ||||
-rw-r--r-- | block/cfq-iosched.c | 24 |
2 files changed, 11 insertions, 21 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 81f34311659a..73ecbed02605 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -2492,14 +2492,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) | |||
2492 | } | 2492 | } |
2493 | EXPORT_SYMBOL(kblockd_schedule_work); | 2493 | EXPORT_SYMBOL(kblockd_schedule_work); |
2494 | 2494 | ||
2495 | int kblockd_schedule_delayed_work(struct request_queue *q, | ||
2496 | struct delayed_work *work, | ||
2497 | unsigned long delay) | ||
2498 | { | ||
2499 | return queue_delayed_work(kblockd_workqueue, work, delay); | ||
2500 | } | ||
2501 | EXPORT_SYMBOL(kblockd_schedule_delayed_work); | ||
2502 | |||
2503 | int __init blk_dev_init(void) | 2495 | int __init blk_dev_init(void) |
2504 | { | 2496 | { |
2505 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * | 2497 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index ae14cbaf9d0e..690ebd96dc42 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -150,7 +150,7 @@ struct cfq_data { | |||
150 | * idle window management | 150 | * idle window management |
151 | */ | 151 | */ |
152 | struct timer_list idle_slice_timer; | 152 | struct timer_list idle_slice_timer; |
153 | struct delayed_work unplug_work; | 153 | struct work_struct unplug_work; |
154 | 154 | ||
155 | struct cfq_queue *active_queue; | 155 | struct cfq_queue *active_queue; |
156 | struct cfq_io_context *active_cic; | 156 | struct cfq_io_context *active_cic; |
@@ -268,13 +268,11 @@ static inline int cfq_bio_sync(struct bio *bio) | |||
268 | * scheduler run of queue, if there are requests pending and no one in the | 268 | * scheduler run of queue, if there are requests pending and no one in the |
269 | * driver that will restart queueing | 269 | * driver that will restart queueing |
270 | */ | 270 | */ |
271 | static inline void cfq_schedule_dispatch(struct cfq_data *cfqd, | 271 | static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) |
272 | unsigned long delay) | ||
273 | { | 272 | { |
274 | if (cfqd->busy_queues) { | 273 | if (cfqd->busy_queues) { |
275 | cfq_log(cfqd, "schedule dispatch"); | 274 | cfq_log(cfqd, "schedule dispatch"); |
276 | kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work, | 275 | kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); |
277 | delay); | ||
278 | } | 276 | } |
279 | } | 277 | } |
280 | 278 | ||
@@ -1400,7 +1398,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
1400 | 1398 | ||
1401 | if (unlikely(cfqd->active_queue == cfqq)) { | 1399 | if (unlikely(cfqd->active_queue == cfqq)) { |
1402 | __cfq_slice_expired(cfqd, cfqq, 0); | 1400 | __cfq_slice_expired(cfqd, cfqq, 0); |
1403 | cfq_schedule_dispatch(cfqd, 0); | 1401 | cfq_schedule_dispatch(cfqd); |
1404 | } | 1402 | } |
1405 | 1403 | ||
1406 | kmem_cache_free(cfq_pool, cfqq); | 1404 | kmem_cache_free(cfq_pool, cfqq); |
@@ -1495,7 +1493,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1495 | { | 1493 | { |
1496 | if (unlikely(cfqq == cfqd->active_queue)) { | 1494 | if (unlikely(cfqq == cfqd->active_queue)) { |
1497 | __cfq_slice_expired(cfqd, cfqq, 0); | 1495 | __cfq_slice_expired(cfqd, cfqq, 0); |
1498 | cfq_schedule_dispatch(cfqd, 0); | 1496 | cfq_schedule_dispatch(cfqd); |
1499 | } | 1497 | } |
1500 | 1498 | ||
1501 | cfq_put_queue(cfqq); | 1499 | cfq_put_queue(cfqq); |
@@ -2213,7 +2211,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
2213 | } | 2211 | } |
2214 | 2212 | ||
2215 | if (!rq_in_driver(cfqd)) | 2213 | if (!rq_in_driver(cfqd)) |
2216 | cfq_schedule_dispatch(cfqd, 0); | 2214 | cfq_schedule_dispatch(cfqd); |
2217 | } | 2215 | } |
2218 | 2216 | ||
2219 | /* | 2217 | /* |
@@ -2343,7 +2341,7 @@ queue_fail: | |||
2343 | if (cic) | 2341 | if (cic) |
2344 | put_io_context(cic->ioc); | 2342 | put_io_context(cic->ioc); |
2345 | 2343 | ||
2346 | cfq_schedule_dispatch(cfqd, 0); | 2344 | cfq_schedule_dispatch(cfqd); |
2347 | spin_unlock_irqrestore(q->queue_lock, flags); | 2345 | spin_unlock_irqrestore(q->queue_lock, flags); |
2348 | cfq_log(cfqd, "set_request fail"); | 2346 | cfq_log(cfqd, "set_request fail"); |
2349 | return 1; | 2347 | return 1; |
@@ -2352,7 +2350,7 @@ queue_fail: | |||
2352 | static void cfq_kick_queue(struct work_struct *work) | 2350 | static void cfq_kick_queue(struct work_struct *work) |
2353 | { | 2351 | { |
2354 | struct cfq_data *cfqd = | 2352 | struct cfq_data *cfqd = |
2355 | container_of(work, struct cfq_data, unplug_work.work); | 2353 | container_of(work, struct cfq_data, unplug_work); |
2356 | struct request_queue *q = cfqd->queue; | 2354 | struct request_queue *q = cfqd->queue; |
2357 | 2355 | ||
2358 | spin_lock_irq(q->queue_lock); | 2356 | spin_lock_irq(q->queue_lock); |
@@ -2406,7 +2404,7 @@ static void cfq_idle_slice_timer(unsigned long data) | |||
2406 | expire: | 2404 | expire: |
2407 | cfq_slice_expired(cfqd, timed_out); | 2405 | cfq_slice_expired(cfqd, timed_out); |
2408 | out_kick: | 2406 | out_kick: |
2409 | cfq_schedule_dispatch(cfqd, 0); | 2407 | cfq_schedule_dispatch(cfqd); |
2410 | out_cont: | 2408 | out_cont: |
2411 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); | 2409 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
2412 | } | 2410 | } |
@@ -2414,7 +2412,7 @@ out_cont: | |||
2414 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) | 2412 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) |
2415 | { | 2413 | { |
2416 | del_timer_sync(&cfqd->idle_slice_timer); | 2414 | del_timer_sync(&cfqd->idle_slice_timer); |
2417 | cancel_delayed_work_sync(&cfqd->unplug_work); | 2415 | cancel_work_sync(&cfqd->unplug_work); |
2418 | } | 2416 | } |
2419 | 2417 | ||
2420 | static void cfq_put_async_queues(struct cfq_data *cfqd) | 2418 | static void cfq_put_async_queues(struct cfq_data *cfqd) |
@@ -2496,7 +2494,7 @@ static void *cfq_init_queue(struct request_queue *q) | |||
2496 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; | 2494 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; |
2497 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; | 2495 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; |
2498 | 2496 | ||
2499 | INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue); | 2497 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); |
2500 | 2498 | ||
2501 | cfqd->cfq_quantum = cfq_quantum; | 2499 | cfqd->cfq_quantum = cfq_quantum; |
2502 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; | 2500 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; |