diff options
author | Vivek Goyal <vgoyal@redhat.com> | 2009-12-03 12:59:40 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-12-03 13:28:51 -0500 |
commit | f04a64246344ad50e4b4b4186174a0912d07f30b (patch) | |
tree | 03d2d39d2811350af39dd635297b90ca52b9b6fe /block/cfq-iosched.c | |
parent | 615f0259e6940293359a189f4881bb28c2fea40b (diff) |
blkio: Keep queue on service tree until we expire it
o Currently cfqq deletes a queue from service tree if it is empty (even if
we might idle on the queue). This patch keeps the queue on service tree
hence associated group remains on the service tree until we decide that
we are not going to idle on the queue and expire it.
o This just helps in time accounting for queue/group and in implementation
of rest of the patches.
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 70 |
1 files changed, 49 insertions, 21 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index fab2be0fa215..7f5646ac9f5d 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -393,7 +393,7 @@ static int cfq_queue_empty(struct request_queue *q) | |||
393 | { | 393 | { |
394 | struct cfq_data *cfqd = q->elevator->elevator_data; | 394 | struct cfq_data *cfqd = q->elevator->elevator_data; |
395 | 395 | ||
396 | return !cfqd->busy_queues; | 396 | return !cfqd->rq_queued; |
397 | } | 397 | } |
398 | 398 | ||
399 | /* | 399 | /* |
@@ -842,7 +842,6 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
842 | static void cfq_del_rq_rb(struct request *rq) | 842 | static void cfq_del_rq_rb(struct request *rq) |
843 | { | 843 | { |
844 | struct cfq_queue *cfqq = RQ_CFQQ(rq); | 844 | struct cfq_queue *cfqq = RQ_CFQQ(rq); |
845 | struct cfq_data *cfqd = cfqq->cfqd; | ||
846 | const int sync = rq_is_sync(rq); | 845 | const int sync = rq_is_sync(rq); |
847 | 846 | ||
848 | BUG_ON(!cfqq->queued[sync]); | 847 | BUG_ON(!cfqq->queued[sync]); |
@@ -850,8 +849,17 @@ static void cfq_del_rq_rb(struct request *rq) | |||
850 | 849 | ||
851 | elv_rb_del(&cfqq->sort_list, rq); | 850 | elv_rb_del(&cfqq->sort_list, rq); |
852 | 851 | ||
853 | if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) | 852 | if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) { |
854 | cfq_del_cfqq_rr(cfqd, cfqq); | 853 | /* |
854 | * Queue will be deleted from service tree when we actually | ||
855 | * expire it later. Right now just remove it from prio tree | ||
856 | * as it is empty. | ||
857 | */ | ||
858 | if (cfqq->p_root) { | ||
859 | rb_erase(&cfqq->p_node, cfqq->p_root); | ||
860 | cfqq->p_root = NULL; | ||
861 | } | ||
862 | } | ||
855 | } | 863 | } |
856 | 864 | ||
857 | static void cfq_add_rq_rb(struct request *rq) | 865 | static void cfq_add_rq_rb(struct request *rq) |
@@ -1065,6 +1073,9 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1065 | cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); | 1073 | cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); |
1066 | } | 1074 | } |
1067 | 1075 | ||
1076 | if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) | ||
1077 | cfq_del_cfqq_rr(cfqd, cfqq); | ||
1078 | |||
1068 | cfq_resort_rr_list(cfqd, cfqq); | 1079 | cfq_resort_rr_list(cfqd, cfqq); |
1069 | 1080 | ||
1070 | if (cfqq == cfqd->active_queue) | 1081 | if (cfqq == cfqd->active_queue) |
@@ -1094,11 +1105,30 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) | |||
1094 | service_tree_for(cfqd->serving_group, cfqd->serving_prio, | 1105 | service_tree_for(cfqd->serving_group, cfqd->serving_prio, |
1095 | cfqd->serving_type, cfqd); | 1106 | cfqd->serving_type, cfqd); |
1096 | 1107 | ||
1108 | if (!cfqd->rq_queued) | ||
1109 | return NULL; | ||
1110 | |||
1097 | if (RB_EMPTY_ROOT(&service_tree->rb)) | 1111 | if (RB_EMPTY_ROOT(&service_tree->rb)) |
1098 | return NULL; | 1112 | return NULL; |
1099 | return cfq_rb_first(service_tree); | 1113 | return cfq_rb_first(service_tree); |
1100 | } | 1114 | } |
1101 | 1115 | ||
1116 | static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) | ||
1117 | { | ||
1118 | struct cfq_group *cfqg = &cfqd->root_group; | ||
1119 | struct cfq_queue *cfqq; | ||
1120 | int i, j; | ||
1121 | struct cfq_rb_root *st; | ||
1122 | |||
1123 | if (!cfqd->rq_queued) | ||
1124 | return NULL; | ||
1125 | |||
1126 | for_each_cfqg_st(cfqg, i, j, st) | ||
1127 | if ((cfqq = cfq_rb_first(st)) != NULL) | ||
1128 | return cfqq; | ||
1129 | return NULL; | ||
1130 | } | ||
1131 | |||
1102 | /* | 1132 | /* |
1103 | * Get and set a new active queue for service. | 1133 | * Get and set a new active queue for service. |
1104 | */ | 1134 | */ |
@@ -1231,6 +1261,9 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1231 | enum wl_prio_t prio = cfqq_prio(cfqq); | 1261 | enum wl_prio_t prio = cfqq_prio(cfqq); |
1232 | struct cfq_rb_root *service_tree = cfqq->service_tree; | 1262 | struct cfq_rb_root *service_tree = cfqq->service_tree; |
1233 | 1263 | ||
1264 | BUG_ON(!service_tree); | ||
1265 | BUG_ON(!service_tree->count); | ||
1266 | |||
1234 | /* We never do for idle class queues. */ | 1267 | /* We never do for idle class queues. */ |
1235 | if (prio == IDLE_WORKLOAD) | 1268 | if (prio == IDLE_WORKLOAD) |
1236 | return false; | 1269 | return false; |
@@ -1243,14 +1276,7 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1243 | * Otherwise, we do only if they are the last ones | 1276 | * Otherwise, we do only if they are the last ones |
1244 | * in their service tree. | 1277 | * in their service tree. |
1245 | */ | 1278 | */ |
1246 | if (!service_tree) | 1279 | return service_tree->count == 1; |
1247 | service_tree = service_tree_for(cfqq->cfqg, prio, | ||
1248 | cfqq_type(cfqq), cfqd); | ||
1249 | |||
1250 | if (service_tree->count == 0) | ||
1251 | return true; | ||
1252 | |||
1253 | return (service_tree->count == 1 && cfq_rb_first(service_tree) == cfqq); | ||
1254 | } | 1280 | } |
1255 | 1281 | ||
1256 | static void cfq_arm_slice_timer(struct cfq_data *cfqd) | 1282 | static void cfq_arm_slice_timer(struct cfq_data *cfqd) |
@@ -1527,6 +1553,8 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
1527 | if (!cfqq) | 1553 | if (!cfqq) |
1528 | goto new_queue; | 1554 | goto new_queue; |
1529 | 1555 | ||
1556 | if (!cfqd->rq_queued) | ||
1557 | return NULL; | ||
1530 | /* | 1558 | /* |
1531 | * The active queue has run out of time, expire it and select new. | 1559 | * The active queue has run out of time, expire it and select new. |
1532 | */ | 1560 | */ |
@@ -1589,6 +1617,9 @@ static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) | |||
1589 | } | 1617 | } |
1590 | 1618 | ||
1591 | BUG_ON(!list_empty(&cfqq->fifo)); | 1619 | BUG_ON(!list_empty(&cfqq->fifo)); |
1620 | |||
1621 | /* By default cfqq is not expired if it is empty. Do it explicitly */ | ||
1622 | __cfq_slice_expired(cfqq->cfqd, cfqq, 0); | ||
1592 | return dispatched; | 1623 | return dispatched; |
1593 | } | 1624 | } |
1594 | 1625 | ||
@@ -1600,14 +1631,9 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) | |||
1600 | { | 1631 | { |
1601 | struct cfq_queue *cfqq; | 1632 | struct cfq_queue *cfqq; |
1602 | int dispatched = 0; | 1633 | int dispatched = 0; |
1603 | int i, j; | ||
1604 | struct cfq_group *cfqg = &cfqd->root_group; | ||
1605 | struct cfq_rb_root *st; | ||
1606 | 1634 | ||
1607 | for_each_cfqg_st(cfqg, i, j, st) { | 1635 | while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) |
1608 | while ((cfqq = cfq_rb_first(st)) != NULL) | 1636 | dispatched += __cfq_forced_dispatch_cfqq(cfqq); |
1609 | dispatched += __cfq_forced_dispatch_cfqq(cfqq); | ||
1610 | } | ||
1611 | 1637 | ||
1612 | cfq_slice_expired(cfqd, 0); | 1638 | cfq_slice_expired(cfqd, 0); |
1613 | BUG_ON(cfqd->busy_queues); | 1639 | BUG_ON(cfqd->busy_queues); |
@@ -1776,13 +1802,13 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
1776 | cfq_log_cfqq(cfqd, cfqq, "put_queue"); | 1802 | cfq_log_cfqq(cfqd, cfqq, "put_queue"); |
1777 | BUG_ON(rb_first(&cfqq->sort_list)); | 1803 | BUG_ON(rb_first(&cfqq->sort_list)); |
1778 | BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); | 1804 | BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); |
1779 | BUG_ON(cfq_cfqq_on_rr(cfqq)); | ||
1780 | 1805 | ||
1781 | if (unlikely(cfqd->active_queue == cfqq)) { | 1806 | if (unlikely(cfqd->active_queue == cfqq)) { |
1782 | __cfq_slice_expired(cfqd, cfqq, 0); | 1807 | __cfq_slice_expired(cfqd, cfqq, 0); |
1783 | cfq_schedule_dispatch(cfqd); | 1808 | cfq_schedule_dispatch(cfqd); |
1784 | } | 1809 | } |
1785 | 1810 | ||
1811 | BUG_ON(cfq_cfqq_on_rr(cfqq)); | ||
1786 | kmem_cache_free(cfq_pool, cfqq); | 1812 | kmem_cache_free(cfq_pool, cfqq); |
1787 | } | 1813 | } |
1788 | 1814 | ||
@@ -2444,9 +2470,11 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
2444 | if (cfq_class_idle(cfqq)) | 2470 | if (cfq_class_idle(cfqq)) |
2445 | return true; | 2471 | return true; |
2446 | 2472 | ||
2473 | /* Allow preemption only if we are idling on sync-noidle tree */ | ||
2447 | if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD && | 2474 | if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD && |
2448 | cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD && | 2475 | cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD && |
2449 | new_cfqq->service_tree->count == 1) | 2476 | new_cfqq->service_tree->count == 2 && |
2477 | RB_EMPTY_ROOT(&cfqq->sort_list)) | ||
2450 | return true; | 2478 | return true; |
2451 | 2479 | ||
2452 | /* | 2480 | /* |