aboutsummaryrefslogtreecommitdiffstats
path: root/block/as-iosched.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/as-iosched.c')
-rw-r--r--block/as-iosched.c139
1 files changed, 3 insertions, 136 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index ad1cc4077819..6db494333c3a 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -14,7 +14,6 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/hash.h>
18#include <linux/rbtree.h> 17#include <linux/rbtree.h>
19#include <linux/interrupt.h> 18#include <linux/interrupt.h>
20 19
@@ -95,7 +94,6 @@ struct as_data {
95 94
96 struct as_rq *next_arq[2]; /* next in sort order */ 95 struct as_rq *next_arq[2]; /* next in sort order */
97 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ 96 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
98 struct hlist_head *hash; /* request hash */
99 97
100 unsigned long exit_prob; /* probability a task will exit while 98 unsigned long exit_prob; /* probability a task will exit while
101 being waited on */ 99 being waited on */
@@ -162,11 +160,6 @@ struct as_rq {
162 struct io_context *io_context; /* The submitting task */ 160 struct io_context *io_context; /* The submitting task */
163 161
164 /* 162 /*
165 * request hash, key is the ending offset (for back merge lookup)
166 */
167 struct hlist_node hash;
168
169 /*
170 * expire fifo 163 * expire fifo
171 */ 164 */
172 struct list_head fifo; 165 struct list_head fifo;
@@ -273,77 +266,6 @@ static void as_put_io_context(struct as_rq *arq)
273} 266}
274 267
275/* 268/*
276 * the back merge hash support functions
277 */
278static const int as_hash_shift = 6;
279#define AS_HASH_BLOCK(sec) ((sec) >> 3)
280#define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
281#define AS_HASH_ENTRIES (1 << as_hash_shift)
282#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
283
284static inline void __as_del_arq_hash(struct as_rq *arq)
285{
286 hlist_del_init(&arq->hash);
287}
288
289static inline void as_del_arq_hash(struct as_rq *arq)
290{
291 if (!hlist_unhashed(&arq->hash))
292 __as_del_arq_hash(arq);
293}
294
295static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
296{
297 struct request *rq = arq->request;
298
299 BUG_ON(!hlist_unhashed(&arq->hash));
300
301 hlist_add_head(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
302}
303
304/*
305 * move hot entry to front of chain
306 */
307static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq)
308{
309 struct request *rq = arq->request;
310 struct hlist_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
311
312 if (hlist_unhashed(&arq->hash)) {
313 WARN_ON(1);
314 return;
315 }
316
317 if (&arq->hash != head->first) {
318 hlist_del(&arq->hash);
319 hlist_add_head(&arq->hash, head);
320 }
321}
322
323static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
324{
325 struct hlist_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
326 struct hlist_node *entry, *next;
327 struct as_rq *arq;
328
329 hlist_for_each_entry_safe(arq, entry, next, hash_list, hash) {
330 struct request *__rq = arq->request;
331
332 BUG_ON(hlist_unhashed(&arq->hash));
333
334 if (!rq_mergeable(__rq)) {
335 as_del_arq_hash(arq);
336 continue;
337 }
338
339 if (rq_hash_key(__rq) == offset)
340 return __rq;
341 }
342
343 return NULL;
344}
345
346/*
347 * rb tree support functions 269 * rb tree support functions
348 */ 270 */
349#define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node) 271#define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node)
@@ -1060,7 +982,6 @@ static void as_remove_queued_request(request_queue_t *q, struct request *rq)
1060 ad->next_arq[data_dir] = as_find_next_arq(ad, arq); 982 ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
1061 983
1062 list_del_init(&arq->fifo); 984 list_del_init(&arq->fifo);
1063 as_del_arq_hash(arq);
1064 as_del_arq_rb(ad, arq); 985 as_del_arq_rb(ad, arq);
1065} 986}
1066 987
@@ -1349,8 +1270,6 @@ static void as_add_request(request_queue_t *q, struct request *rq)
1349 } 1270 }
1350 1271
1351 as_add_arq_rb(ad, arq); 1272 as_add_arq_rb(ad, arq);
1352 if (rq_mergeable(arq->request))
1353 as_add_arq_hash(ad, arq);
1354 1273
1355 /* 1274 /*
1356 * set expire time (only used for reads) and add to fifo list 1275 * set expire time (only used for reads) and add to fifo list
@@ -1428,42 +1347,17 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
1428 struct as_data *ad = q->elevator->elevator_data; 1347 struct as_data *ad = q->elevator->elevator_data;
1429 sector_t rb_key = bio->bi_sector + bio_sectors(bio); 1348 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
1430 struct request *__rq; 1349 struct request *__rq;
1431 int ret;
1432
1433 /*
1434 * see if the merge hash can satisfy a back merge
1435 */
1436 __rq = as_find_arq_hash(ad, bio->bi_sector);
1437 if (__rq) {
1438 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
1439
1440 if (elv_rq_merge_ok(__rq, bio)) {
1441 ret = ELEVATOR_BACK_MERGE;
1442 goto out;
1443 }
1444 }
1445 1350
1446 /* 1351 /*
1447 * check for front merge 1352 * check for front merge
1448 */ 1353 */
1449 __rq = as_find_arq_rb(ad, rb_key, bio_data_dir(bio)); 1354 __rq = as_find_arq_rb(ad, rb_key, bio_data_dir(bio));
1450 if (__rq) { 1355 if (__rq && elv_rq_merge_ok(__rq, bio)) {
1451 BUG_ON(rb_key != rq_rb_key(__rq)); 1356 *req = __rq;
1452 1357 return ELEVATOR_FRONT_MERGE;
1453 if (elv_rq_merge_ok(__rq, bio)) {
1454 ret = ELEVATOR_FRONT_MERGE;
1455 goto out;
1456 }
1457 } 1358 }
1458 1359
1459 return ELEVATOR_NO_MERGE; 1360 return ELEVATOR_NO_MERGE;
1460out:
1461 if (ret) {
1462 if (rq_mergeable(__rq))
1463 as_hot_arq_hash(ad, RQ_DATA(__rq));
1464 }
1465 *req = __rq;
1466 return ret;
1467} 1361}
1468 1362
1469static void as_merged_request(request_queue_t *q, struct request *req) 1363static void as_merged_request(request_queue_t *q, struct request *req)
@@ -1472,12 +1366,6 @@ static void as_merged_request(request_queue_t *q, struct request *req)
1472 struct as_rq *arq = RQ_DATA(req); 1366 struct as_rq *arq = RQ_DATA(req);
1473 1367
1474 /* 1368 /*
1475 * hash always needs to be repositioned, key is end sector
1476 */
1477 as_del_arq_hash(arq);
1478 as_add_arq_hash(ad, arq);
1479
1480 /*
1481 * if the merge was a front merge, we need to reposition request 1369 * if the merge was a front merge, we need to reposition request
1482 */ 1370 */
1483 if (rq_rb_key(req) != arq->rb_key) { 1371 if (rq_rb_key(req) != arq->rb_key) {
@@ -1501,13 +1389,6 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
1501 BUG_ON(!arq); 1389 BUG_ON(!arq);
1502 BUG_ON(!anext); 1390 BUG_ON(!anext);
1503 1391
1504 /*
1505 * reposition arq (this is the merged request) in hash, and in rbtree
1506 * in case of a front merge
1507 */
1508 as_del_arq_hash(arq);
1509 as_add_arq_hash(ad, arq);
1510
1511 if (rq_rb_key(req) != arq->rb_key) { 1392 if (rq_rb_key(req) != arq->rb_key) {
1512 as_del_arq_rb(ad, arq); 1393 as_del_arq_rb(ad, arq);
1513 as_add_arq_rb(ad, arq); 1394 as_add_arq_rb(ad, arq);
@@ -1591,7 +1472,6 @@ static int as_set_request(request_queue_t *q, struct request *rq,
1591 arq->request = rq; 1472 arq->request = rq;
1592 arq->state = AS_RQ_PRESCHED; 1473 arq->state = AS_RQ_PRESCHED;
1593 arq->io_context = NULL; 1474 arq->io_context = NULL;
1594 INIT_HLIST_NODE(&arq->hash);
1595 INIT_LIST_HEAD(&arq->fifo); 1475 INIT_LIST_HEAD(&arq->fifo);
1596 rq->elevator_private = arq; 1476 rq->elevator_private = arq;
1597 return 0; 1477 return 0;
@@ -1628,7 +1508,6 @@ static void as_exit_queue(elevator_t *e)
1628 1508
1629 mempool_destroy(ad->arq_pool); 1509 mempool_destroy(ad->arq_pool);
1630 put_io_context(ad->io_context); 1510 put_io_context(ad->io_context);
1631 kfree(ad->hash);
1632 kfree(ad); 1511 kfree(ad);
1633} 1512}
1634 1513
@@ -1639,7 +1518,6 @@ static void as_exit_queue(elevator_t *e)
1639static void *as_init_queue(request_queue_t *q, elevator_t *e) 1518static void *as_init_queue(request_queue_t *q, elevator_t *e)
1640{ 1519{
1641 struct as_data *ad; 1520 struct as_data *ad;
1642 int i;
1643 1521
1644 if (!arq_pool) 1522 if (!arq_pool)
1645 return NULL; 1523 return NULL;
@@ -1651,17 +1529,9 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e)
1651 1529
1652 ad->q = q; /* Identify what queue the data belongs to */ 1530 ad->q = q; /* Identify what queue the data belongs to */
1653 1531
1654 ad->hash = kmalloc_node(sizeof(struct hlist_head)*AS_HASH_ENTRIES,
1655 GFP_KERNEL, q->node);
1656 if (!ad->hash) {
1657 kfree(ad);
1658 return NULL;
1659 }
1660
1661 ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 1532 ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
1662 mempool_free_slab, arq_pool, q->node); 1533 mempool_free_slab, arq_pool, q->node);
1663 if (!ad->arq_pool) { 1534 if (!ad->arq_pool) {
1664 kfree(ad->hash);
1665 kfree(ad); 1535 kfree(ad);
1666 return NULL; 1536 return NULL;
1667 } 1537 }
@@ -1672,9 +1542,6 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e)
1672 init_timer(&ad->antic_timer); 1542 init_timer(&ad->antic_timer);
1673 INIT_WORK(&ad->antic_work, as_work_handler, q); 1543 INIT_WORK(&ad->antic_work, as_work_handler, q);
1674 1544
1675 for (i = 0; i < AS_HASH_ENTRIES; i++)
1676 INIT_HLIST_HEAD(&ad->hash[i]);
1677
1678 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); 1545 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
1679 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); 1546 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
1680 ad->sort_list[REQ_SYNC] = RB_ROOT; 1547 ad->sort_list[REQ_SYNC] = RB_ROOT;