aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c8
-rw-r--r--block/cfq-iosched.c56
-rw-r--r--include/linux/blkdev.h4
3 files changed, 48 insertions, 20 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index ddaaea4fdffc..a8c7fbe52e24 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2492,6 +2492,14 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2492} 2492}
2493EXPORT_SYMBOL(kblockd_schedule_work); 2493EXPORT_SYMBOL(kblockd_schedule_work);
2494 2494
2495int kblockd_schedule_delayed_work(struct request_queue *q,
2496 struct delayed_work *work,
2497 unsigned long delay)
2498{
2499 return queue_delayed_work(kblockd_workqueue, work, delay);
2500}
2501EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2502
2495int __init blk_dev_init(void) 2503int __init blk_dev_init(void)
2496{ 2504{
2497 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2505 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 70b48ea0e3e9..fce8a749f4be 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -150,7 +150,7 @@ struct cfq_data {
150 * idle window management 150 * idle window management
151 */ 151 */
152 struct timer_list idle_slice_timer; 152 struct timer_list idle_slice_timer;
153 struct work_struct unplug_work; 153 struct delayed_work unplug_work;
154 154
155 struct cfq_queue *active_queue; 155 struct cfq_queue *active_queue;
156 struct cfq_io_context *active_cic; 156 struct cfq_io_context *active_cic;
@@ -268,11 +268,13 @@ static inline int cfq_bio_sync(struct bio *bio)
268 * scheduler run of queue, if there are requests pending and no one in the 268 * scheduler run of queue, if there are requests pending and no one in the
269 * driver that will restart queueing 269 * driver that will restart queueing
270 */ 270 */
271static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 271static inline void cfq_schedule_dispatch(struct cfq_data *cfqd,
272 unsigned long delay)
272{ 273{
273 if (cfqd->busy_queues) { 274 if (cfqd->busy_queues) {
274 cfq_log(cfqd, "schedule dispatch"); 275 cfq_log(cfqd, "schedule dispatch");
275 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); 276 kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work,
277 delay);
276 } 278 }
277} 279}
278 280
@@ -1316,8 +1318,6 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
1316 * Does this cfqq already have too much IO in flight? 1318 * Does this cfqq already have too much IO in flight?
1317 */ 1319 */
1318 if (cfqq->dispatched >= max_dispatch) { 1320 if (cfqq->dispatched >= max_dispatch) {
1319 unsigned long load_at = cfqd->last_end_sync_rq + cfq_slice_sync;
1320
1321 /* 1321 /*
1322 * idle queue must always only have a single IO in flight 1322 * idle queue must always only have a single IO in flight
1323 */ 1323 */
@@ -1331,20 +1331,36 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
1331 return 0; 1331 return 0;
1332 1332
1333 /* 1333 /*
1334 * If a sync request has completed recently, don't overload 1334 * Sole queue user, allow bigger slice
1335 * the dispatch queue yet with async requests.
1336 */ 1335 */
1337 if (cfqd->cfq_desktop && !cfq_cfqq_sync(cfqq) 1336 max_dispatch *= 4;
1338 && time_before(jiffies, load_at)) 1337 }
1339 return 0; 1338
1339 /*
1340 * Async queues must wait a bit before being allowed dispatch.
1341 * We also ramp up the dispatch depth gradually for async IO,
1342 * based on the last sync IO we serviced
1343 */
1344 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_desktop) {
1345 unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
1346 unsigned int depth;
1340 1347
1341 /* 1348 /*
1342 * we are the only queue, allow up to 4 times of 'quantum' 1349 * must wait a bit longer
1343 */ 1350 */
1344 if (cfqq->dispatched >= 4 * max_dispatch) 1351 if (last_sync < cfq_slice_sync) {
1352 cfq_schedule_dispatch(cfqd, cfq_slice_sync - last_sync);
1345 return 0; 1353 return 0;
1354 }
1355
1356 depth = last_sync / cfq_slice_sync;
1357 if (depth < max_dispatch)
1358 max_dispatch = depth;
1346 } 1359 }
1347 1360
1361 if (cfqq->dispatched >= max_dispatch)
1362 return 0;
1363
1348 /* 1364 /*
1349 * Dispatch a request from this cfqq 1365 * Dispatch a request from this cfqq
1350 */ 1366 */
@@ -1389,7 +1405,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
1389 1405
1390 if (unlikely(cfqd->active_queue == cfqq)) { 1406 if (unlikely(cfqd->active_queue == cfqq)) {
1391 __cfq_slice_expired(cfqd, cfqq, 0); 1407 __cfq_slice_expired(cfqd, cfqq, 0);
1392 cfq_schedule_dispatch(cfqd); 1408 cfq_schedule_dispatch(cfqd, 0);
1393 } 1409 }
1394 1410
1395 kmem_cache_free(cfq_pool, cfqq); 1411 kmem_cache_free(cfq_pool, cfqq);
@@ -1484,7 +1500,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1484{ 1500{
1485 if (unlikely(cfqq == cfqd->active_queue)) { 1501 if (unlikely(cfqq == cfqd->active_queue)) {
1486 __cfq_slice_expired(cfqd, cfqq, 0); 1502 __cfq_slice_expired(cfqd, cfqq, 0);
1487 cfq_schedule_dispatch(cfqd); 1503 cfq_schedule_dispatch(cfqd, 0);
1488 } 1504 }
1489 1505
1490 cfq_put_queue(cfqq); 1506 cfq_put_queue(cfqq);
@@ -2201,7 +2217,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
2201 } 2217 }
2202 2218
2203 if (!rq_in_driver(cfqd)) 2219 if (!rq_in_driver(cfqd))
2204 cfq_schedule_dispatch(cfqd); 2220 cfq_schedule_dispatch(cfqd, 0);
2205} 2221}
2206 2222
2207/* 2223/*
@@ -2331,7 +2347,7 @@ queue_fail:
2331 if (cic) 2347 if (cic)
2332 put_io_context(cic->ioc); 2348 put_io_context(cic->ioc);
2333 2349
2334 cfq_schedule_dispatch(cfqd); 2350 cfq_schedule_dispatch(cfqd, 0);
2335 spin_unlock_irqrestore(q->queue_lock, flags); 2351 spin_unlock_irqrestore(q->queue_lock, flags);
2336 cfq_log(cfqd, "set_request fail"); 2352 cfq_log(cfqd, "set_request fail");
2337 return 1; 2353 return 1;
@@ -2340,7 +2356,7 @@ queue_fail:
2340static void cfq_kick_queue(struct work_struct *work) 2356static void cfq_kick_queue(struct work_struct *work)
2341{ 2357{
2342 struct cfq_data *cfqd = 2358 struct cfq_data *cfqd =
2343 container_of(work, struct cfq_data, unplug_work); 2359 container_of(work, struct cfq_data, unplug_work.work);
2344 struct request_queue *q = cfqd->queue; 2360 struct request_queue *q = cfqd->queue;
2345 2361
2346 spin_lock_irq(q->queue_lock); 2362 spin_lock_irq(q->queue_lock);
@@ -2394,7 +2410,7 @@ static void cfq_idle_slice_timer(unsigned long data)
2394expire: 2410expire:
2395 cfq_slice_expired(cfqd, timed_out); 2411 cfq_slice_expired(cfqd, timed_out);
2396out_kick: 2412out_kick:
2397 cfq_schedule_dispatch(cfqd); 2413 cfq_schedule_dispatch(cfqd, 0);
2398out_cont: 2414out_cont:
2399 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2415 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2400} 2416}
@@ -2402,7 +2418,7 @@ out_cont:
2402static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 2418static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2403{ 2419{
2404 del_timer_sync(&cfqd->idle_slice_timer); 2420 del_timer_sync(&cfqd->idle_slice_timer);
2405 cancel_work_sync(&cfqd->unplug_work); 2421 cancel_delayed_work_sync(&cfqd->unplug_work);
2406} 2422}
2407 2423
2408static void cfq_put_async_queues(struct cfq_data *cfqd) 2424static void cfq_put_async_queues(struct cfq_data *cfqd)
@@ -2484,7 +2500,7 @@ static void *cfq_init_queue(struct request_queue *q)
2484 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2500 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2485 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2501 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2486 2502
2487 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 2503 INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue);
2488 2504
2489 cfqd->cfq_quantum = cfq_quantum; 2505 cfqd->cfq_quantum = cfq_quantum;
2490 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 2506 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1a03b715dfad..a7323930d2ba 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1147,7 +1147,11 @@ static inline void put_dev_sector(Sector p)
1147} 1147}
1148 1148
1149struct work_struct; 1149struct work_struct;
1150struct delayed_work;
1150int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1151int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1152int kblockd_schedule_delayed_work(struct request_queue *q,
1153 struct delayed_work *work,
1154 unsigned long delay);
1151 1155
1152#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1156#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1153 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1157 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))