diff options
author | Tejun Heo <htejun@gmail.com> | 2005-11-11 08:27:09 -0500 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-01-06 03:39:03 -0500 |
commit | ef9be1d336378de279d4e37779f1b83cebadbcc0 (patch) | |
tree | decd8ee4c74ea8c30de9eca3a591bb0962ccd15f /block/as-iosched.c | |
parent | 9f155b9802bb7049cd0f216c3fe903b58620df11 (diff) |
[BLOCK] as-iosched: update alias handling
Unlike other ioscheds, as-iosched handles alias by chaing them using
rq->queuelist. As aliased requests are very rare in the first place,
this complicates merge/dispatch handling without meaningful
performance improvement. This patch updates as-iosched to dump
aliased requests into dispatch queue as other ioscheds do.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/as-iosched.c')
-rw-r--r-- | block/as-iosched.c | 144 |
1 files changed, 25 insertions, 119 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 43fa20495688..8da3cf66894c 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -182,6 +182,9 @@ struct as_rq { | |||
182 | 182 | ||
183 | static kmem_cache_t *arq_pool; | 183 | static kmem_cache_t *arq_pool; |
184 | 184 | ||
185 | static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq); | ||
186 | static void as_antic_stop(struct as_data *ad); | ||
187 | |||
185 | /* | 188 | /* |
186 | * IO Context helper functions | 189 | * IO Context helper functions |
187 | */ | 190 | */ |
@@ -370,7 +373,7 @@ static struct as_rq *as_find_first_arq(struct as_data *ad, int data_dir) | |||
370 | * existing request against the same sector), which can happen when using | 373 | * existing request against the same sector), which can happen when using |
371 | * direct IO, then return the alias. | 374 | * direct IO, then return the alias. |
372 | */ | 375 | */ |
373 | static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq) | 376 | static struct as_rq *__as_add_arq_rb(struct as_data *ad, struct as_rq *arq) |
374 | { | 377 | { |
375 | struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node; | 378 | struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node; |
376 | struct rb_node *parent = NULL; | 379 | struct rb_node *parent = NULL; |
@@ -397,6 +400,16 @@ static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq) | |||
397 | return NULL; | 400 | return NULL; |
398 | } | 401 | } |
399 | 402 | ||
403 | static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq) | ||
404 | { | ||
405 | struct as_rq *alias; | ||
406 | |||
407 | while ((unlikely(alias = __as_add_arq_rb(ad, arq)))) { | ||
408 | as_move_to_dispatch(ad, alias); | ||
409 | as_antic_stop(ad); | ||
410 | } | ||
411 | } | ||
412 | |||
400 | static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq) | 413 | static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq) |
401 | { | 414 | { |
402 | if (!ON_RB(&arq->rb_node)) { | 415 | if (!ON_RB(&arq->rb_node)) { |
@@ -1133,23 +1146,6 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) | |||
1133 | /* | 1146 | /* |
1134 | * take it off the sort and fifo list, add to dispatch queue | 1147 | * take it off the sort and fifo list, add to dispatch queue |
1135 | */ | 1148 | */ |
1136 | while (!list_empty(&rq->queuelist)) { | ||
1137 | struct request *__rq = list_entry_rq(rq->queuelist.next); | ||
1138 | struct as_rq *__arq = RQ_DATA(__rq); | ||
1139 | |||
1140 | list_del(&__rq->queuelist); | ||
1141 | |||
1142 | elv_dispatch_add_tail(ad->q, __rq); | ||
1143 | |||
1144 | if (__arq->io_context && __arq->io_context->aic) | ||
1145 | atomic_inc(&__arq->io_context->aic->nr_dispatched); | ||
1146 | |||
1147 | WARN_ON(__arq->state != AS_RQ_QUEUED); | ||
1148 | __arq->state = AS_RQ_DISPATCHED; | ||
1149 | |||
1150 | ad->nr_dispatched++; | ||
1151 | } | ||
1152 | |||
1153 | as_remove_queued_request(ad->q, rq); | 1149 | as_remove_queued_request(ad->q, rq); |
1154 | WARN_ON(arq->state != AS_RQ_QUEUED); | 1150 | WARN_ON(arq->state != AS_RQ_QUEUED); |
1155 | 1151 | ||
@@ -1326,49 +1322,12 @@ fifo_expired: | |||
1326 | } | 1322 | } |
1327 | 1323 | ||
1328 | /* | 1324 | /* |
1329 | * Add arq to a list behind alias | ||
1330 | */ | ||
1331 | static inline void | ||
1332 | as_add_aliased_request(struct as_data *ad, struct as_rq *arq, | ||
1333 | struct as_rq *alias) | ||
1334 | { | ||
1335 | struct request *req = arq->request; | ||
1336 | struct list_head *insert = alias->request->queuelist.prev; | ||
1337 | |||
1338 | /* | ||
1339 | * Transfer list of aliases | ||
1340 | */ | ||
1341 | while (!list_empty(&req->queuelist)) { | ||
1342 | struct request *__rq = list_entry_rq(req->queuelist.next); | ||
1343 | struct as_rq *__arq = RQ_DATA(__rq); | ||
1344 | |||
1345 | list_move_tail(&__rq->queuelist, &alias->request->queuelist); | ||
1346 | |||
1347 | WARN_ON(__arq->state != AS_RQ_QUEUED); | ||
1348 | } | ||
1349 | |||
1350 | /* | ||
1351 | * Another request with the same start sector on the rbtree. | ||
1352 | * Link this request to that sector. They are untangled in | ||
1353 | * as_move_to_dispatch | ||
1354 | */ | ||
1355 | list_add(&arq->request->queuelist, insert); | ||
1356 | |||
1357 | /* | ||
1358 | * Don't want to have to handle merges. | ||
1359 | */ | ||
1360 | as_del_arq_hash(arq); | ||
1361 | arq->request->flags |= REQ_NOMERGE; | ||
1362 | } | ||
1363 | |||
1364 | /* | ||
1365 | * add arq to rbtree and fifo | 1325 | * add arq to rbtree and fifo |
1366 | */ | 1326 | */ |
1367 | static void as_add_request(request_queue_t *q, struct request *rq) | 1327 | static void as_add_request(request_queue_t *q, struct request *rq) |
1368 | { | 1328 | { |
1369 | struct as_data *ad = q->elevator->elevator_data; | 1329 | struct as_data *ad = q->elevator->elevator_data; |
1370 | struct as_rq *arq = RQ_DATA(rq); | 1330 | struct as_rq *arq = RQ_DATA(rq); |
1371 | struct as_rq *alias; | ||
1372 | int data_dir; | 1331 | int data_dir; |
1373 | 1332 | ||
1374 | arq->state = AS_RQ_NEW; | 1333 | arq->state = AS_RQ_NEW; |
@@ -1387,33 +1346,17 @@ static void as_add_request(request_queue_t *q, struct request *rq) | |||
1387 | atomic_inc(&arq->io_context->aic->nr_queued); | 1346 | atomic_inc(&arq->io_context->aic->nr_queued); |
1388 | } | 1347 | } |
1389 | 1348 | ||
1390 | alias = as_add_arq_rb(ad, arq); | 1349 | as_add_arq_rb(ad, arq); |
1391 | if (!alias) { | 1350 | if (rq_mergeable(arq->request)) |
1392 | /* | 1351 | as_add_arq_hash(ad, arq); |
1393 | * set expire time (only used for reads) and add to fifo list | ||
1394 | */ | ||
1395 | arq->expires = jiffies + ad->fifo_expire[data_dir]; | ||
1396 | list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]); | ||
1397 | 1352 | ||
1398 | if (rq_mergeable(arq->request)) | 1353 | /* |
1399 | as_add_arq_hash(ad, arq); | 1354 | * set expire time (only used for reads) and add to fifo list |
1400 | as_update_arq(ad, arq); /* keep state machine up to date */ | 1355 | */ |
1401 | 1356 | arq->expires = jiffies + ad->fifo_expire[data_dir]; | |
1402 | } else { | 1357 | list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]); |
1403 | as_add_aliased_request(ad, arq, alias); | ||
1404 | |||
1405 | /* | ||
1406 | * have we been anticipating this request? | ||
1407 | * or does it come from the same process as the one we are | ||
1408 | * anticipating for? | ||
1409 | */ | ||
1410 | if (ad->antic_status == ANTIC_WAIT_REQ | ||
1411 | || ad->antic_status == ANTIC_WAIT_NEXT) { | ||
1412 | if (as_can_break_anticipation(ad, arq)) | ||
1413 | as_antic_stop(ad); | ||
1414 | } | ||
1415 | } | ||
1416 | 1358 | ||
1359 | as_update_arq(ad, arq); /* keep state machine up to date */ | ||
1417 | arq->state = AS_RQ_QUEUED; | 1360 | arq->state = AS_RQ_QUEUED; |
1418 | } | 1361 | } |
1419 | 1362 | ||
@@ -1536,23 +1479,8 @@ static void as_merged_request(request_queue_t *q, struct request *req) | |||
1536 | * if the merge was a front merge, we need to reposition request | 1479 | * if the merge was a front merge, we need to reposition request |
1537 | */ | 1480 | */ |
1538 | if (rq_rb_key(req) != arq->rb_key) { | 1481 | if (rq_rb_key(req) != arq->rb_key) { |
1539 | struct as_rq *alias, *next_arq = NULL; | ||
1540 | |||
1541 | if (ad->next_arq[arq->is_sync] == arq) | ||
1542 | next_arq = as_find_next_arq(ad, arq); | ||
1543 | |||
1544 | /* | ||
1545 | * Note! We should really be moving any old aliased requests | ||
1546 | * off this request and try to insert them into the rbtree. We | ||
1547 | * currently don't bother. Ditto the next function. | ||
1548 | */ | ||
1549 | as_del_arq_rb(ad, arq); | 1482 | as_del_arq_rb(ad, arq); |
1550 | if ((alias = as_add_arq_rb(ad, arq))) { | 1483 | as_add_arq_rb(ad, arq); |
1551 | list_del_init(&arq->fifo); | ||
1552 | as_add_aliased_request(ad, arq, alias); | ||
1553 | if (next_arq) | ||
1554 | ad->next_arq[arq->is_sync] = next_arq; | ||
1555 | } | ||
1556 | /* | 1484 | /* |
1557 | * Note! At this stage of this and the next function, our next | 1485 | * Note! At this stage of this and the next function, our next |
1558 | * request may not be optimal - eg the request may have "grown" | 1486 | * request may not be optimal - eg the request may have "grown" |
@@ -1579,18 +1507,8 @@ static void as_merged_requests(request_queue_t *q, struct request *req, | |||
1579 | as_add_arq_hash(ad, arq); | 1507 | as_add_arq_hash(ad, arq); |
1580 | 1508 | ||
1581 | if (rq_rb_key(req) != arq->rb_key) { | 1509 | if (rq_rb_key(req) != arq->rb_key) { |
1582 | struct as_rq *alias, *next_arq = NULL; | ||
1583 | |||
1584 | if (ad->next_arq[arq->is_sync] == arq) | ||
1585 | next_arq = as_find_next_arq(ad, arq); | ||
1586 | |||
1587 | as_del_arq_rb(ad, arq); | 1510 | as_del_arq_rb(ad, arq); |
1588 | if ((alias = as_add_arq_rb(ad, arq))) { | 1511 | as_add_arq_rb(ad, arq); |
1589 | list_del_init(&arq->fifo); | ||
1590 | as_add_aliased_request(ad, arq, alias); | ||
1591 | if (next_arq) | ||
1592 | ad->next_arq[arq->is_sync] = next_arq; | ||
1593 | } | ||
1594 | } | 1512 | } |
1595 | 1513 | ||
1596 | /* | 1514 | /* |
@@ -1610,18 +1528,6 @@ static void as_merged_requests(request_queue_t *q, struct request *req, | |||
1610 | } | 1528 | } |
1611 | 1529 | ||
1612 | /* | 1530 | /* |
1613 | * Transfer list of aliases | ||
1614 | */ | ||
1615 | while (!list_empty(&next->queuelist)) { | ||
1616 | struct request *__rq = list_entry_rq(next->queuelist.next); | ||
1617 | struct as_rq *__arq = RQ_DATA(__rq); | ||
1618 | |||
1619 | list_move_tail(&__rq->queuelist, &req->queuelist); | ||
1620 | |||
1621 | WARN_ON(__arq->state != AS_RQ_QUEUED); | ||
1622 | } | ||
1623 | |||
1624 | /* | ||
1625 | * kill knowledge of next, this one is a goner | 1531 | * kill knowledge of next, this one is a goner |
1626 | */ | 1532 | */ |
1627 | as_remove_queued_request(q, next); | 1533 | as_remove_queued_request(q, next); |