diff options
-rw-r--r-- | block/cfq-iosched.c | 26 |
1 files changed, 24 insertions, 2 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index fb2141ec205..135b1a48da2 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -237,6 +237,7 @@ struct cfq_data { | |||
237 | struct rb_root prio_trees[CFQ_PRIO_LISTS]; | 237 | struct rb_root prio_trees[CFQ_PRIO_LISTS]; |
238 | 238 | ||
239 | unsigned int busy_queues; | 239 | unsigned int busy_queues; |
240 | unsigned int busy_sync_queues; | ||
240 | 241 | ||
241 | int rq_in_driver; | 242 | int rq_in_driver; |
242 | int rq_in_flight[2]; | 243 | int rq_in_flight[2]; |
@@ -1344,6 +1345,8 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1344 | BUG_ON(cfq_cfqq_on_rr(cfqq)); | 1345 | BUG_ON(cfq_cfqq_on_rr(cfqq)); |
1345 | cfq_mark_cfqq_on_rr(cfqq); | 1346 | cfq_mark_cfqq_on_rr(cfqq); |
1346 | cfqd->busy_queues++; | 1347 | cfqd->busy_queues++; |
1348 | if (cfq_cfqq_sync(cfqq)) | ||
1349 | cfqd->busy_sync_queues++; | ||
1347 | 1350 | ||
1348 | cfq_resort_rr_list(cfqd, cfqq); | 1351 | cfq_resort_rr_list(cfqd, cfqq); |
1349 | } | 1352 | } |
@@ -1370,6 +1373,8 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1370 | cfq_group_service_tree_del(cfqd, cfqq->cfqg); | 1373 | cfq_group_service_tree_del(cfqd, cfqq->cfqg); |
1371 | BUG_ON(!cfqd->busy_queues); | 1374 | BUG_ON(!cfqd->busy_queues); |
1372 | cfqd->busy_queues--; | 1375 | cfqd->busy_queues--; |
1376 | if (cfq_cfqq_sync(cfqq)) | ||
1377 | cfqd->busy_sync_queues--; | ||
1373 | } | 1378 | } |
1374 | 1379 | ||
1375 | /* | 1380 | /* |
@@ -2377,6 +2382,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
2377 | * Does this cfqq already have too much IO in flight? | 2382 | * Does this cfqq already have too much IO in flight? |
2378 | */ | 2383 | */ |
2379 | if (cfqq->dispatched >= max_dispatch) { | 2384 | if (cfqq->dispatched >= max_dispatch) { |
2385 | bool promote_sync = false; | ||
2380 | /* | 2386 | /* |
2381 | * idle queue must always only have a single IO in flight | 2387 | * idle queue must always only have a single IO in flight |
2382 | */ | 2388 | */ |
@@ -2384,15 +2390,31 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
2384 | return false; | 2390 | return false; |
2385 | 2391 | ||
2386 | /* | 2392 | /* |
2393 | * If there is only one sync queue, and its think time is | ||
2394 | * small, we can ignore async queue here and give the sync | ||
2395 | * queue no dispatch limit. The reason is a sync queue can | ||
2396 | * preempt async queue, limiting the sync queue doesn't make | ||
2397 | * sense. This is useful for aiostress test. | ||
2398 | */ | ||
2399 | if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1) { | ||
2400 | struct cfq_io_context *cic = RQ_CIC(cfqq->next_rq); | ||
2401 | |||
2402 | if (sample_valid(cic->ttime_samples) && | ||
2403 | cic->ttime_mean < cfqd->cfq_slice_idle) | ||
2404 | promote_sync = true; | ||
2405 | } | ||
2406 | |||
2407 | /* | ||
2387 | * We have other queues, don't allow more IO from this one | 2408 | * We have other queues, don't allow more IO from this one |
2388 | */ | 2409 | */ |
2389 | if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq)) | 2410 | if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) && |
2411 | !promote_sync) | ||
2390 | return false; | 2412 | return false; |
2391 | 2413 | ||
2392 | /* | 2414 | /* |
2393 | * Sole queue user, no limit | 2415 | * Sole queue user, no limit |
2394 | */ | 2416 | */ |
2395 | if (cfqd->busy_queues == 1) | 2417 | if (cfqd->busy_queues == 1 || promote_sync) |
2396 | max_dispatch = -1; | 2418 | max_dispatch = -1; |
2397 | else | 2419 | else |
2398 | /* | 2420 | /* |