diff options
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 63 |
1 files changed, 46 insertions, 17 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 1ca813b16e78..9c4b679908f4 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -150,7 +150,7 @@ struct cfq_data { | |||
150 | * idle window management | 150 | * idle window management |
151 | */ | 151 | */ |
152 | struct timer_list idle_slice_timer; | 152 | struct timer_list idle_slice_timer; |
153 | struct work_struct unplug_work; | 153 | struct delayed_work unplug_work; |
154 | 154 | ||
155 | struct cfq_queue *active_queue; | 155 | struct cfq_queue *active_queue; |
156 | struct cfq_io_context *active_cic; | 156 | struct cfq_io_context *active_cic; |
@@ -173,6 +173,7 @@ struct cfq_data { | |||
173 | unsigned int cfq_slice[2]; | 173 | unsigned int cfq_slice[2]; |
174 | unsigned int cfq_slice_async_rq; | 174 | unsigned int cfq_slice_async_rq; |
175 | unsigned int cfq_slice_idle; | 175 | unsigned int cfq_slice_idle; |
176 | unsigned int cfq_latency; | ||
176 | 177 | ||
177 | struct list_head cic_list; | 178 | struct list_head cic_list; |
178 | 179 | ||
@@ -180,6 +181,8 @@ struct cfq_data { | |||
180 | * Fallback dummy cfqq for extreme OOM conditions | 181 | * Fallback dummy cfqq for extreme OOM conditions |
181 | */ | 182 | */ |
182 | struct cfq_queue oom_cfqq; | 183 | struct cfq_queue oom_cfqq; |
184 | |||
185 | unsigned long last_end_sync_rq; | ||
183 | }; | 186 | }; |
184 | 187 | ||
185 | enum cfqq_state_flags { | 188 | enum cfqq_state_flags { |
@@ -265,11 +268,13 @@ static inline int cfq_bio_sync(struct bio *bio) | |||
265 | * scheduler run of queue, if there are requests pending and no one in the | 268 | * scheduler run of queue, if there are requests pending and no one in the |
266 | * driver that will restart queueing | 269 | * driver that will restart queueing |
267 | */ | 270 | */ |
268 | static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) | 271 | static inline void cfq_schedule_dispatch(struct cfq_data *cfqd, |
272 | unsigned long delay) | ||
269 | { | 273 | { |
270 | if (cfqd->busy_queues) { | 274 | if (cfqd->busy_queues) { |
271 | cfq_log(cfqd, "schedule dispatch"); | 275 | cfq_log(cfqd, "schedule dispatch"); |
272 | kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); | 276 | kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work, |
277 | delay); | ||
273 | } | 278 | } |
274 | } | 279 | } |
275 | 280 | ||
@@ -1326,12 +1331,30 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) | |||
1326 | return 0; | 1331 | return 0; |
1327 | 1332 | ||
1328 | /* | 1333 | /* |
1329 | * we are the only queue, allow up to 4 times of 'quantum' | 1334 | * Sole queue user, allow bigger slice |
1330 | */ | 1335 | */ |
1331 | if (cfqq->dispatched >= 4 * max_dispatch) | 1336 | max_dispatch *= 4; |
1332 | return 0; | 1337 | } |
1338 | |||
1339 | /* | ||
1340 | * Async queues must wait a bit before being allowed dispatch. | ||
1341 | * We also ramp up the dispatch depth gradually for async IO, | ||
1342 | * based on the last sync IO we serviced | ||
1343 | */ | ||
1344 | if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { | ||
1345 | unsigned long last_sync = jiffies - cfqd->last_end_sync_rq; | ||
1346 | unsigned int depth; | ||
1347 | |||
1348 | depth = last_sync / cfqd->cfq_slice[1]; | ||
1349 | if (!depth && !cfqq->dispatched) | ||
1350 | depth = 1; | ||
1351 | if (depth < max_dispatch) | ||
1352 | max_dispatch = depth; | ||
1333 | } | 1353 | } |
1334 | 1354 | ||
1355 | if (cfqq->dispatched >= max_dispatch) | ||
1356 | return 0; | ||
1357 | |||
1335 | /* | 1358 | /* |
1336 | * Dispatch a request from this cfqq | 1359 | * Dispatch a request from this cfqq |
1337 | */ | 1360 | */ |
@@ -1376,7 +1399,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
1376 | 1399 | ||
1377 | if (unlikely(cfqd->active_queue == cfqq)) { | 1400 | if (unlikely(cfqd->active_queue == cfqq)) { |
1378 | __cfq_slice_expired(cfqd, cfqq, 0); | 1401 | __cfq_slice_expired(cfqd, cfqq, 0); |
1379 | cfq_schedule_dispatch(cfqd); | 1402 | cfq_schedule_dispatch(cfqd, 0); |
1380 | } | 1403 | } |
1381 | 1404 | ||
1382 | kmem_cache_free(cfq_pool, cfqq); | 1405 | kmem_cache_free(cfq_pool, cfqq); |
@@ -1471,7 +1494,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1471 | { | 1494 | { |
1472 | if (unlikely(cfqq == cfqd->active_queue)) { | 1495 | if (unlikely(cfqq == cfqd->active_queue)) { |
1473 | __cfq_slice_expired(cfqd, cfqq, 0); | 1496 | __cfq_slice_expired(cfqd, cfqq, 0); |
1474 | cfq_schedule_dispatch(cfqd); | 1497 | cfq_schedule_dispatch(cfqd, 0); |
1475 | } | 1498 | } |
1476 | 1499 | ||
1477 | cfq_put_queue(cfqq); | 1500 | cfq_put_queue(cfqq); |
@@ -1951,7 +1974,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1951 | enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); | 1974 | enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); |
1952 | 1975 | ||
1953 | if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || | 1976 | if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || |
1954 | (cfqd->hw_tag && CIC_SEEKY(cic))) | 1977 | (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) |
1955 | enable_idle = 0; | 1978 | enable_idle = 0; |
1956 | else if (sample_valid(cic->ttime_samples)) { | 1979 | else if (sample_valid(cic->ttime_samples)) { |
1957 | if (cic->ttime_mean > cfqd->cfq_slice_idle) | 1980 | if (cic->ttime_mean > cfqd->cfq_slice_idle) |
@@ -2157,8 +2180,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
2157 | if (cfq_cfqq_sync(cfqq)) | 2180 | if (cfq_cfqq_sync(cfqq)) |
2158 | cfqd->sync_flight--; | 2181 | cfqd->sync_flight--; |
2159 | 2182 | ||
2160 | if (sync) | 2183 | if (sync) { |
2161 | RQ_CIC(rq)->last_end_request = now; | 2184 | RQ_CIC(rq)->last_end_request = now; |
2185 | cfqd->last_end_sync_rq = now; | ||
2186 | } | ||
2162 | 2187 | ||
2163 | /* | 2188 | /* |
2164 | * If this is the active queue, check if it needs to be expired, | 2189 | * If this is the active queue, check if it needs to be expired, |
@@ -2186,7 +2211,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
2186 | } | 2211 | } |
2187 | 2212 | ||
2188 | if (!rq_in_driver(cfqd)) | 2213 | if (!rq_in_driver(cfqd)) |
2189 | cfq_schedule_dispatch(cfqd); | 2214 | cfq_schedule_dispatch(cfqd, 0); |
2190 | } | 2215 | } |
2191 | 2216 | ||
2192 | /* | 2217 | /* |
@@ -2316,7 +2341,7 @@ queue_fail: | |||
2316 | if (cic) | 2341 | if (cic) |
2317 | put_io_context(cic->ioc); | 2342 | put_io_context(cic->ioc); |
2318 | 2343 | ||
2319 | cfq_schedule_dispatch(cfqd); | 2344 | cfq_schedule_dispatch(cfqd, 0); |
2320 | spin_unlock_irqrestore(q->queue_lock, flags); | 2345 | spin_unlock_irqrestore(q->queue_lock, flags); |
2321 | cfq_log(cfqd, "set_request fail"); | 2346 | cfq_log(cfqd, "set_request fail"); |
2322 | return 1; | 2347 | return 1; |
@@ -2325,7 +2350,7 @@ queue_fail: | |||
2325 | static void cfq_kick_queue(struct work_struct *work) | 2350 | static void cfq_kick_queue(struct work_struct *work) |
2326 | { | 2351 | { |
2327 | struct cfq_data *cfqd = | 2352 | struct cfq_data *cfqd = |
2328 | container_of(work, struct cfq_data, unplug_work); | 2353 | container_of(work, struct cfq_data, unplug_work.work); |
2329 | struct request_queue *q = cfqd->queue; | 2354 | struct request_queue *q = cfqd->queue; |
2330 | 2355 | ||
2331 | spin_lock_irq(q->queue_lock); | 2356 | spin_lock_irq(q->queue_lock); |
@@ -2379,7 +2404,7 @@ static void cfq_idle_slice_timer(unsigned long data) | |||
2379 | expire: | 2404 | expire: |
2380 | cfq_slice_expired(cfqd, timed_out); | 2405 | cfq_slice_expired(cfqd, timed_out); |
2381 | out_kick: | 2406 | out_kick: |
2382 | cfq_schedule_dispatch(cfqd); | 2407 | cfq_schedule_dispatch(cfqd, 0); |
2383 | out_cont: | 2408 | out_cont: |
2384 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); | 2409 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
2385 | } | 2410 | } |
@@ -2387,7 +2412,7 @@ out_cont: | |||
2387 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) | 2412 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) |
2388 | { | 2413 | { |
2389 | del_timer_sync(&cfqd->idle_slice_timer); | 2414 | del_timer_sync(&cfqd->idle_slice_timer); |
2390 | cancel_work_sync(&cfqd->unplug_work); | 2415 | cancel_delayed_work_sync(&cfqd->unplug_work); |
2391 | } | 2416 | } |
2392 | 2417 | ||
2393 | static void cfq_put_async_queues(struct cfq_data *cfqd) | 2418 | static void cfq_put_async_queues(struct cfq_data *cfqd) |
@@ -2469,7 +2494,7 @@ static void *cfq_init_queue(struct request_queue *q) | |||
2469 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; | 2494 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; |
2470 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; | 2495 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; |
2471 | 2496 | ||
2472 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); | 2497 | INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue); |
2473 | 2498 | ||
2474 | cfqd->cfq_quantum = cfq_quantum; | 2499 | cfqd->cfq_quantum = cfq_quantum; |
2475 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; | 2500 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; |
@@ -2480,8 +2505,9 @@ static void *cfq_init_queue(struct request_queue *q) | |||
2480 | cfqd->cfq_slice[1] = cfq_slice_sync; | 2505 | cfqd->cfq_slice[1] = cfq_slice_sync; |
2481 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; | 2506 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; |
2482 | cfqd->cfq_slice_idle = cfq_slice_idle; | 2507 | cfqd->cfq_slice_idle = cfq_slice_idle; |
2508 | cfqd->cfq_latency = 1; | ||
2483 | cfqd->hw_tag = 1; | 2509 | cfqd->hw_tag = 1; |
2484 | 2510 | cfqd->last_end_sync_rq = jiffies; | |
2485 | return cfqd; | 2511 | return cfqd; |
2486 | } | 2512 | } |
2487 | 2513 | ||
@@ -2549,6 +2575,7 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); | |||
2549 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); | 2575 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); |
2550 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); | 2576 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); |
2551 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); | 2577 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); |
2578 | SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); | ||
2552 | #undef SHOW_FUNCTION | 2579 | #undef SHOW_FUNCTION |
2553 | 2580 | ||
2554 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 2581 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
@@ -2580,6 +2607,7 @@ STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); | |||
2580 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | 2607 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); |
2581 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, | 2608 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, |
2582 | UINT_MAX, 0); | 2609 | UINT_MAX, 0); |
2610 | STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); | ||
2583 | #undef STORE_FUNCTION | 2611 | #undef STORE_FUNCTION |
2584 | 2612 | ||
2585 | #define CFQ_ATTR(name) \ | 2613 | #define CFQ_ATTR(name) \ |
@@ -2595,6 +2623,7 @@ static struct elv_fs_entry cfq_attrs[] = { | |||
2595 | CFQ_ATTR(slice_async), | 2623 | CFQ_ATTR(slice_async), |
2596 | CFQ_ATTR(slice_async_rq), | 2624 | CFQ_ATTR(slice_async_rq), |
2597 | CFQ_ATTR(slice_idle), | 2625 | CFQ_ATTR(slice_idle), |
2626 | CFQ_ATTR(low_latency), | ||
2598 | __ATTR_NULL | 2627 | __ATTR_NULL |
2599 | }; | 2628 | }; |
2600 | 2629 | ||