diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 35 |
1 files changed, 27 insertions, 8 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 89dc745c7d94..9697053f80bc 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -237,6 +237,7 @@ struct cfq_data { | |||
237 | struct rb_root prio_trees[CFQ_PRIO_LISTS]; | 237 | struct rb_root prio_trees[CFQ_PRIO_LISTS]; |
238 | 238 | ||
239 | unsigned int busy_queues; | 239 | unsigned int busy_queues; |
240 | unsigned int busy_sync_queues; | ||
240 | 241 | ||
241 | int rq_in_driver; | 242 | int rq_in_driver; |
242 | int rq_in_flight[2]; | 243 | int rq_in_flight[2]; |
@@ -556,15 +557,13 @@ static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime) | |||
556 | 557 | ||
557 | static void update_min_vdisktime(struct cfq_rb_root *st) | 558 | static void update_min_vdisktime(struct cfq_rb_root *st) |
558 | { | 559 | { |
559 | u64 vdisktime = st->min_vdisktime; | ||
560 | struct cfq_group *cfqg; | 560 | struct cfq_group *cfqg; |
561 | 561 | ||
562 | if (st->left) { | 562 | if (st->left) { |
563 | cfqg = rb_entry_cfqg(st->left); | 563 | cfqg = rb_entry_cfqg(st->left); |
564 | vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime); | 564 | st->min_vdisktime = max_vdisktime(st->min_vdisktime, |
565 | cfqg->vdisktime); | ||
565 | } | 566 | } |
566 | |||
567 | st->min_vdisktime = max_vdisktime(st->min_vdisktime, vdisktime); | ||
568 | } | 567 | } |
569 | 568 | ||
570 | /* | 569 | /* |
@@ -1344,6 +1343,8 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1344 | BUG_ON(cfq_cfqq_on_rr(cfqq)); | 1343 | BUG_ON(cfq_cfqq_on_rr(cfqq)); |
1345 | cfq_mark_cfqq_on_rr(cfqq); | 1344 | cfq_mark_cfqq_on_rr(cfqq); |
1346 | cfqd->busy_queues++; | 1345 | cfqd->busy_queues++; |
1346 | if (cfq_cfqq_sync(cfqq)) | ||
1347 | cfqd->busy_sync_queues++; | ||
1347 | 1348 | ||
1348 | cfq_resort_rr_list(cfqd, cfqq); | 1349 | cfq_resort_rr_list(cfqd, cfqq); |
1349 | } | 1350 | } |
@@ -1370,6 +1371,8 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1370 | cfq_group_service_tree_del(cfqd, cfqq->cfqg); | 1371 | cfq_group_service_tree_del(cfqd, cfqq->cfqg); |
1371 | BUG_ON(!cfqd->busy_queues); | 1372 | BUG_ON(!cfqd->busy_queues); |
1372 | cfqd->busy_queues--; | 1373 | cfqd->busy_queues--; |
1374 | if (cfq_cfqq_sync(cfqq)) | ||
1375 | cfqd->busy_sync_queues--; | ||
1373 | } | 1376 | } |
1374 | 1377 | ||
1375 | /* | 1378 | /* |
@@ -2377,6 +2380,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
2377 | * Does this cfqq already have too much IO in flight? | 2380 | * Does this cfqq already have too much IO in flight? |
2378 | */ | 2381 | */ |
2379 | if (cfqq->dispatched >= max_dispatch) { | 2382 | if (cfqq->dispatched >= max_dispatch) { |
2383 | bool promote_sync = false; | ||
2380 | /* | 2384 | /* |
2381 | * idle queue must always only have a single IO in flight | 2385 | * idle queue must always only have a single IO in flight |
2382 | */ | 2386 | */ |
@@ -2384,15 +2388,31 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
2384 | return false; | 2388 | return false; |
2385 | 2389 | ||
2386 | /* | 2390 | /* |
2391 | * If there is only one sync queue, and its think time is | ||
2392 | * small, we can ignore async queue here and give the sync | ||
2393 | * queue no dispatch limit. The reason is a sync queue can | ||
2394 | * preempt async queue, limiting the sync queue doesn't make | ||
2395 | * sense. This is useful for aiostress test. | ||
2396 | */ | ||
2397 | if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1) { | ||
2398 | struct cfq_io_context *cic = RQ_CIC(cfqq->next_rq); | ||
2399 | |||
2400 | if (sample_valid(cic->ttime_samples) && | ||
2401 | cic->ttime_mean < cfqd->cfq_slice_idle) | ||
2402 | promote_sync = true; | ||
2403 | } | ||
2404 | |||
2405 | /* | ||
2387 | * We have other queues, don't allow more IO from this one | 2406 | * We have other queues, don't allow more IO from this one |
2388 | */ | 2407 | */ |
2389 | if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq)) | 2408 | if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) && |
2409 | !promote_sync) | ||
2390 | return false; | 2410 | return false; |
2391 | 2411 | ||
2392 | /* | 2412 | /* |
2393 | * Sole queue user, no limit | 2413 | * Sole queue user, no limit |
2394 | */ | 2414 | */ |
2395 | if (cfqd->busy_queues == 1) | 2415 | if (cfqd->busy_queues == 1 || promote_sync) |
2396 | max_dispatch = -1; | 2416 | max_dispatch = -1; |
2397 | else | 2417 | else |
2398 | /* | 2418 | /* |
@@ -3675,12 +3695,11 @@ new_queue: | |||
3675 | 3695 | ||
3676 | cfqq->allocated[rw]++; | 3696 | cfqq->allocated[rw]++; |
3677 | 3697 | ||
3678 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
3679 | |||
3680 | cfqq->ref++; | 3698 | cfqq->ref++; |
3681 | rq->elevator_private[0] = cic; | 3699 | rq->elevator_private[0] = cic; |
3682 | rq->elevator_private[1] = cfqq; | 3700 | rq->elevator_private[1] = cfqq; |
3683 | rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg); | 3701 | rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg); |
3702 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
3684 | return 0; | 3703 | return 0; |
3685 | 3704 | ||
3686 | queue_fail: | 3705 | queue_fail: |