diff options
author | Jeff Moyer <jmoyer@redhat.com> | 2009-10-23 17:14:50 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-10-26 09:34:47 -0400 |
commit | df5fe3e8e13883f58dc97489076bbcc150789a21 (patch) | |
tree | b05ce6760cd2b52e42dca22e3659a0255f4a4e4a /block/cfq-iosched.c | |
parent | b2c18e1e08a5a9663094d57bb4be2f02226ee61c (diff) |
cfq: merge cooperating cfq_queues
When cooperating cfq_queues are detected currently, they are allowed to
skip ahead in the scheduling order. It is much more efficient to
automatically share the cfq_queue data structure between cooperating processes.
Performance of the read-test2 benchmark (which is written to emulate the
dump(8) utility) went from 12MB/s to 90MB/s on my SATA disk. NFS servers
with multiple nfsd threads also saw performance increases.
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 89 |
1 files changed, 87 insertions, 2 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 78cc8ee5da41..f0994aedb390 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -118,6 +118,8 @@ struct cfq_queue { | |||
118 | sector_t last_request_pos; | 118 | sector_t last_request_pos; |
119 | 119 | ||
120 | pid_t pid; | 120 | pid_t pid; |
121 | |||
122 | struct cfq_queue *new_cfqq; | ||
121 | }; | 123 | }; |
122 | 124 | ||
123 | /* | 125 | /* |
@@ -1047,6 +1049,12 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, | |||
1047 | if (!cfqq) | 1049 | if (!cfqq) |
1048 | return NULL; | 1050 | return NULL; |
1049 | 1051 | ||
1052 | /* | ||
1053 | * It only makes sense to merge sync queues. | ||
1054 | */ | ||
1055 | if (!cfq_cfqq_sync(cfqq)) | ||
1056 | return NULL; | ||
1057 | |||
1050 | if (cfq_cfqq_coop(cfqq)) | 1058 | if (cfq_cfqq_coop(cfqq)) |
1051 | return NULL; | 1059 | return NULL; |
1052 | 1060 | ||
@@ -1168,6 +1176,43 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1168 | } | 1176 | } |
1169 | 1177 | ||
1170 | /* | 1178 | /* |
1179 | * Must be called with the queue_lock held. | ||
1180 | */ | ||
1181 | static int cfqq_process_refs(struct cfq_queue *cfqq) | ||
1182 | { | ||
1183 | int process_refs, io_refs; | ||
1184 | |||
1185 | io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE]; | ||
1186 | process_refs = atomic_read(&cfqq->ref) - io_refs; | ||
1187 | BUG_ON(process_refs < 0); | ||
1188 | return process_refs; | ||
1189 | } | ||
1190 | |||
1191 | static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) | ||
1192 | { | ||
1193 | int process_refs; | ||
1194 | struct cfq_queue *__cfqq; | ||
1195 | |||
1196 | /* Avoid a circular list and skip interim queue merges */ | ||
1197 | while ((__cfqq = new_cfqq->new_cfqq)) { | ||
1198 | if (__cfqq == cfqq) | ||
1199 | return; | ||
1200 | new_cfqq = __cfqq; | ||
1201 | } | ||
1202 | |||
1203 | process_refs = cfqq_process_refs(cfqq); | ||
1204 | /* | ||
1205 | * If the process for the cfqq has gone away, there is no | ||
1206 | * sense in merging the queues. | ||
1207 | */ | ||
1208 | if (process_refs == 0) | ||
1209 | return; | ||
1210 | |||
1211 | cfqq->new_cfqq = new_cfqq; | ||
1212 | atomic_add(process_refs, &new_cfqq->ref); | ||
1213 | } | ||
1214 | |||
1215 | /* | ||
1171 | * Select a queue for service. If we have a current active queue, | 1216 | * Select a queue for service. If we have a current active queue, |
1172 | * check whether to continue servicing it, or retrieve and set a new one. | 1217 | * check whether to continue servicing it, or retrieve and set a new one. |
1173 | */ | 1218 | */ |
@@ -1196,11 +1241,14 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
1196 | * If another queue has a request waiting within our mean seek | 1241 | * If another queue has a request waiting within our mean seek |
1197 | * distance, let it run. The expire code will check for close | 1242 | * distance, let it run. The expire code will check for close |
1198 | * cooperators and put the close queue at the front of the service | 1243 | * cooperators and put the close queue at the front of the service |
1199 | * tree. | 1244 | * tree. If possible, merge the expiring queue with the new cfqq. |
1200 | */ | 1245 | */ |
1201 | new_cfqq = cfq_close_cooperator(cfqd, cfqq, 0); | 1246 | new_cfqq = cfq_close_cooperator(cfqd, cfqq, 0); |
1202 | if (new_cfqq) | 1247 | if (new_cfqq) { |
1248 | if (!cfqq->new_cfqq) | ||
1249 | cfq_setup_merge(cfqq, new_cfqq); | ||
1203 | goto expire; | 1250 | goto expire; |
1251 | } | ||
1204 | 1252 | ||
1205 | /* | 1253 | /* |
1206 | * No requests pending. If the active queue still has requests in | 1254 | * No requests pending. If the active queue still has requests in |
@@ -1511,11 +1559,29 @@ static void cfq_free_io_context(struct io_context *ioc) | |||
1511 | 1559 | ||
1512 | static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 1560 | static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1513 | { | 1561 | { |
1562 | struct cfq_queue *__cfqq, *next; | ||
1563 | |||
1514 | if (unlikely(cfqq == cfqd->active_queue)) { | 1564 | if (unlikely(cfqq == cfqd->active_queue)) { |
1515 | __cfq_slice_expired(cfqd, cfqq, 0); | 1565 | __cfq_slice_expired(cfqd, cfqq, 0); |
1516 | cfq_schedule_dispatch(cfqd); | 1566 | cfq_schedule_dispatch(cfqd); |
1517 | } | 1567 | } |
1518 | 1568 | ||
1569 | /* | ||
1570 | * If this queue was scheduled to merge with another queue, be | ||
1571 | * sure to drop the reference taken on that queue (and others in | ||
1572 | * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs. | ||
1573 | */ | ||
1574 | __cfqq = cfqq->new_cfqq; | ||
1575 | while (__cfqq) { | ||
1576 | if (__cfqq == cfqq) { | ||
1577 | WARN(1, "cfqq->new_cfqq loop detected\n"); | ||
1578 | break; | ||
1579 | } | ||
1580 | next = __cfqq->new_cfqq; | ||
1581 | cfq_put_queue(__cfqq); | ||
1582 | __cfqq = next; | ||
1583 | } | ||
1584 | |||
1519 | cfq_put_queue(cfqq); | 1585 | cfq_put_queue(cfqq); |
1520 | } | 1586 | } |
1521 | 1587 | ||
@@ -2323,6 +2389,16 @@ static void cfq_put_request(struct request *rq) | |||
2323 | } | 2389 | } |
2324 | } | 2390 | } |
2325 | 2391 | ||
2392 | static struct cfq_queue * | ||
2393 | cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic, | ||
2394 | struct cfq_queue *cfqq) | ||
2395 | { | ||
2396 | cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq); | ||
2397 | cic_set_cfqq(cic, cfqq->new_cfqq, 1); | ||
2398 | cfq_put_queue(cfqq); | ||
2399 | return cic_to_cfqq(cic, 1); | ||
2400 | } | ||
2401 | |||
2326 | /* | 2402 | /* |
2327 | * Allocate cfq data structures associated with this request. | 2403 | * Allocate cfq data structures associated with this request. |
2328 | */ | 2404 | */ |
@@ -2349,6 +2425,15 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | |||
2349 | if (!cfqq || cfqq == &cfqd->oom_cfqq) { | 2425 | if (!cfqq || cfqq == &cfqd->oom_cfqq) { |
2350 | cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); | 2426 | cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); |
2351 | cic_set_cfqq(cic, cfqq, is_sync); | 2427 | cic_set_cfqq(cic, cfqq, is_sync); |
2428 | } else { | ||
2429 | /* | ||
2430 | * Check to see if this queue is scheduled to merge with | ||
2431 | * another, closely cooperating queue. The merging of | ||
2432 | * queues happens here as it must be done in process context. | ||
2433 | * The reference on new_cfqq was taken in merge_cfqqs. | ||
2434 | */ | ||
2435 | if (cfqq->new_cfqq) | ||
2436 | cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq); | ||
2352 | } | 2437 | } |
2353 | 2438 | ||
2354 | cfqq->allocated[rw]++; | 2439 | cfqq->allocated[rw]++; |