diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2009-04-07 02:51:19 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-04-07 02:51:19 -0400 |
commit | 2f5cb7381b737e24c8046fd4aeab571fb71315f5 (patch) | |
tree | 27b82d748bdf99296b8a914c51dad2fbea90aea3 /block | |
parent | 3fbed4c61abd8458896e38633d10110cb5a589d4 (diff) |
cfq-iosched: change dispatch logic to deal with single requests at the time
The IO scheduler core calls into the IO scheduler dispatch_request hook
to move requests from the IO scheduler and into the driver dispatch
list. It only does so when the dispatch list is empty. CFQ moves several
requests to the dispatch list, which can cause higher latencies if we
suddenly have to switch to some important sync IO. Change the logic to
move one request at the time instead.
This should almost be functionally equivalent to what we did before,
except that we now honor 'quantum' as the maximum queue depth at the
device side from any single cfqq. If there's just a single active
cfqq, we allow up to 4 times the normal quantum.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 166 |
1 files changed, 90 insertions, 76 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 9e809345f71a..a0102a507dae 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -160,6 +160,7 @@ struct cfq_queue { | |||
160 | 160 | ||
161 | unsigned long slice_end; | 161 | unsigned long slice_end; |
162 | long slice_resid; | 162 | long slice_resid; |
163 | unsigned int slice_dispatch; | ||
163 | 164 | ||
164 | /* pending metadata requests */ | 165 | /* pending metadata requests */ |
165 | int meta_pending; | 166 | int meta_pending; |
@@ -774,10 +775,16 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, | |||
774 | if (cfqq) { | 775 | if (cfqq) { |
775 | cfq_log_cfqq(cfqd, cfqq, "set_active"); | 776 | cfq_log_cfqq(cfqd, cfqq, "set_active"); |
776 | cfqq->slice_end = 0; | 777 | cfqq->slice_end = 0; |
778 | cfqq->slice_dispatch = 0; | ||
779 | |||
780 | cfq_clear_cfqq_must_dispatch(cfqq); | ||
781 | cfq_clear_cfqq_wait_request(cfqq); | ||
777 | cfq_clear_cfqq_must_alloc_slice(cfqq); | 782 | cfq_clear_cfqq_must_alloc_slice(cfqq); |
778 | cfq_clear_cfqq_fifo_expire(cfqq); | 783 | cfq_clear_cfqq_fifo_expire(cfqq); |
779 | cfq_mark_cfqq_slice_new(cfqq); | 784 | cfq_mark_cfqq_slice_new(cfqq); |
780 | cfq_clear_cfqq_queue_new(cfqq); | 785 | cfq_clear_cfqq_queue_new(cfqq); |
786 | |||
787 | del_timer(&cfqd->idle_slice_timer); | ||
781 | } | 788 | } |
782 | 789 | ||
783 | cfqd->active_queue = cfqq; | 790 | cfqd->active_queue = cfqq; |
@@ -1053,66 +1060,6 @@ keep_queue: | |||
1053 | return cfqq; | 1060 | return cfqq; |
1054 | } | 1061 | } |
1055 | 1062 | ||
1056 | /* | ||
1057 | * Dispatch some requests from cfqq, moving them to the request queue | ||
1058 | * dispatch list. | ||
1059 | */ | ||
1060 | static int | ||
1061 | __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, | ||
1062 | int max_dispatch) | ||
1063 | { | ||
1064 | int dispatched = 0; | ||
1065 | |||
1066 | BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); | ||
1067 | |||
1068 | do { | ||
1069 | struct request *rq; | ||
1070 | |||
1071 | /* | ||
1072 | * follow expired path, else get first next available | ||
1073 | */ | ||
1074 | rq = cfq_check_fifo(cfqq); | ||
1075 | if (rq == NULL) | ||
1076 | rq = cfqq->next_rq; | ||
1077 | |||
1078 | /* | ||
1079 | * finally, insert request into driver dispatch list | ||
1080 | */ | ||
1081 | cfq_dispatch_insert(cfqd->queue, rq); | ||
1082 | |||
1083 | dispatched++; | ||
1084 | |||
1085 | if (!cfqd->active_cic) { | ||
1086 | atomic_inc(&RQ_CIC(rq)->ioc->refcount); | ||
1087 | cfqd->active_cic = RQ_CIC(rq); | ||
1088 | } | ||
1089 | |||
1090 | if (RB_EMPTY_ROOT(&cfqq->sort_list)) | ||
1091 | break; | ||
1092 | |||
1093 | /* | ||
1094 | * If there is a non-empty RT cfqq waiting for current | ||
1095 | * cfqq's timeslice to complete, pre-empt this cfqq | ||
1096 | */ | ||
1097 | if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) | ||
1098 | break; | ||
1099 | |||
1100 | } while (dispatched < max_dispatch); | ||
1101 | |||
1102 | /* | ||
1103 | * expire an async queue immediately if it has used up its slice. idle | ||
1104 | * queue always expire after 1 dispatch round. | ||
1105 | */ | ||
1106 | if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && | ||
1107 | dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) || | ||
1108 | cfq_class_idle(cfqq))) { | ||
1109 | cfqq->slice_end = jiffies + 1; | ||
1110 | cfq_slice_expired(cfqd, 0); | ||
1111 | } | ||
1112 | |||
1113 | return dispatched; | ||
1114 | } | ||
1115 | |||
1116 | static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) | 1063 | static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) |
1117 | { | 1064 | { |
1118 | int dispatched = 0; | 1065 | int dispatched = 0; |
@@ -1146,11 +1093,45 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) | |||
1146 | return dispatched; | 1093 | return dispatched; |
1147 | } | 1094 | } |
1148 | 1095 | ||
1096 | /* | ||
1097 | * Dispatch a request from cfqq, moving them to the request queue | ||
1098 | * dispatch list. | ||
1099 | */ | ||
1100 | static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
1101 | { | ||
1102 | struct request *rq; | ||
1103 | |||
1104 | BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); | ||
1105 | |||
1106 | /* | ||
1107 | * follow expired path, else get first next available | ||
1108 | */ | ||
1109 | rq = cfq_check_fifo(cfqq); | ||
1110 | if (!rq) | ||
1111 | rq = cfqq->next_rq; | ||
1112 | |||
1113 | /* | ||
1114 | * insert request into driver dispatch list | ||
1115 | */ | ||
1116 | cfq_dispatch_insert(cfqd->queue, rq); | ||
1117 | |||
1118 | if (!cfqd->active_cic) { | ||
1119 | struct cfq_io_context *cic = RQ_CIC(rq); | ||
1120 | |||
1121 | atomic_inc(&cic->ioc->refcount); | ||
1122 | cfqd->active_cic = cic; | ||
1123 | } | ||
1124 | } | ||
1125 | |||
1126 | /* | ||
1127 | * Find the cfqq that we need to service and move a request from that to the | ||
1128 | * dispatch list | ||
1129 | */ | ||
1149 | static int cfq_dispatch_requests(struct request_queue *q, int force) | 1130 | static int cfq_dispatch_requests(struct request_queue *q, int force) |
1150 | { | 1131 | { |
1151 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1132 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1152 | struct cfq_queue *cfqq; | 1133 | struct cfq_queue *cfqq; |
1153 | int dispatched; | 1134 | unsigned int max_dispatch; |
1154 | 1135 | ||
1155 | if (!cfqd->busy_queues) | 1136 | if (!cfqd->busy_queues) |
1156 | return 0; | 1137 | return 0; |
@@ -1158,29 +1139,62 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) | |||
1158 | if (unlikely(force)) | 1139 | if (unlikely(force)) |
1159 | return cfq_forced_dispatch(cfqd); | 1140 | return cfq_forced_dispatch(cfqd); |
1160 | 1141 | ||
1161 | dispatched = 0; | 1142 | cfqq = cfq_select_queue(cfqd); |
1162 | while ((cfqq = cfq_select_queue(cfqd)) != NULL) { | 1143 | if (!cfqq) |
1163 | int max_dispatch; | 1144 | return 0; |
1145 | |||
1146 | /* | ||
1147 | * If this is an async queue and we have sync IO in flight, let it wait | ||
1148 | */ | ||
1149 | if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) | ||
1150 | return 0; | ||
1164 | 1151 | ||
1165 | max_dispatch = cfqd->cfq_quantum; | 1152 | max_dispatch = cfqd->cfq_quantum; |
1153 | if (cfq_class_idle(cfqq)) | ||
1154 | max_dispatch = 1; | ||
1155 | |||
1156 | /* | ||
1157 | * Does this cfqq already have too much IO in flight? | ||
1158 | */ | ||
1159 | if (cfqq->dispatched >= max_dispatch) { | ||
1160 | /* | ||
1161 | * idle queue must always only have a single IO in flight | ||
1162 | */ | ||
1166 | if (cfq_class_idle(cfqq)) | 1163 | if (cfq_class_idle(cfqq)) |
1167 | max_dispatch = 1; | 1164 | return 0; |
1168 | 1165 | ||
1169 | if (cfqq->dispatched >= max_dispatch && cfqd->busy_queues > 1) | 1166 | /* |
1170 | break; | 1167 | * We have other queues, don't allow more IO from this one |
1168 | */ | ||
1169 | if (cfqd->busy_queues > 1) | ||
1170 | return 0; | ||
1171 | 1171 | ||
1172 | if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) | 1172 | /* |
1173 | break; | 1173 | * we are the only queue, allow up to 4 times of 'quantum' |
1174 | */ | ||
1175 | if (cfqq->dispatched >= 4 * max_dispatch) | ||
1176 | return 0; | ||
1177 | } | ||
1174 | 1178 | ||
1175 | cfq_clear_cfqq_must_dispatch(cfqq); | 1179 | /* |
1176 | cfq_clear_cfqq_wait_request(cfqq); | 1180 | * Dispatch a request from this cfqq |
1177 | del_timer(&cfqd->idle_slice_timer); | 1181 | */ |
1182 | cfq_dispatch_request(cfqd, cfqq); | ||
1183 | cfqq->slice_dispatch++; | ||
1178 | 1184 | ||
1179 | dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); | 1185 | /* |
1186 | * expire an async queue immediately if it has used up its slice. idle | ||
1187 | * queue always expire after 1 dispatch round. | ||
1188 | */ | ||
1189 | if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && | ||
1190 | cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || | ||
1191 | cfq_class_idle(cfqq))) { | ||
1192 | cfqq->slice_end = jiffies + 1; | ||
1193 | cfq_slice_expired(cfqd, 0); | ||
1180 | } | 1194 | } |
1181 | 1195 | ||
1182 | cfq_log(cfqd, "dispatched=%d", dispatched); | 1196 | cfq_log(cfqd, "dispatched a request"); |
1183 | return dispatched; | 1197 | return 1; |
1184 | } | 1198 | } |
1185 | 1199 | ||
1186 | /* | 1200 | /* |