aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-07-28 03:23:08 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 14:26:56 -0400
commit9817064b68fef7e4580c6df1ea597e106b9ff88b (patch)
tree76c27990626247613e9efa45b792d51ad79635d7 /block
parent4aff5e2333c9a1609662f2091f55c3f6fffdad36 (diff)
[PATCH] elevator: move the backmerging logic into the elevator core
Right now, every IO scheduler implements its own backmerging (except for noop, which does no merging). That results in duplicated code for essentially the same operation, which is never a good thing. This patch moves the backmerging out of the io schedulers and into the elevator core. We save 1.6kb of text and as a bonus get backmerging for noop as well. Win-win! Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block')
-rw-r--r--block/as-iosched.c139
-rw-r--r--block/cfq-iosched.c86
-rw-r--r--block/deadline-iosched.c128
-rw-r--r--block/elevator.c147
-rw-r--r--block/ll_rw_blk.c2
5 files changed, 142 insertions, 360 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index ad1cc4077819..6db494333c3a 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -14,7 +14,6 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/hash.h>
18#include <linux/rbtree.h> 17#include <linux/rbtree.h>
19#include <linux/interrupt.h> 18#include <linux/interrupt.h>
20 19
@@ -95,7 +94,6 @@ struct as_data {
95 94
96 struct as_rq *next_arq[2]; /* next in sort order */ 95 struct as_rq *next_arq[2]; /* next in sort order */
97 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ 96 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
98 struct hlist_head *hash; /* request hash */
99 97
100 unsigned long exit_prob; /* probability a task will exit while 98 unsigned long exit_prob; /* probability a task will exit while
101 being waited on */ 99 being waited on */
@@ -162,11 +160,6 @@ struct as_rq {
162 struct io_context *io_context; /* The submitting task */ 160 struct io_context *io_context; /* The submitting task */
163 161
164 /* 162 /*
165 * request hash, key is the ending offset (for back merge lookup)
166 */
167 struct hlist_node hash;
168
169 /*
170 * expire fifo 163 * expire fifo
171 */ 164 */
172 struct list_head fifo; 165 struct list_head fifo;
@@ -273,77 +266,6 @@ static void as_put_io_context(struct as_rq *arq)
273} 266}
274 267
275/* 268/*
276 * the back merge hash support functions
277 */
278static const int as_hash_shift = 6;
279#define AS_HASH_BLOCK(sec) ((sec) >> 3)
280#define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
281#define AS_HASH_ENTRIES (1 << as_hash_shift)
282#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
283
284static inline void __as_del_arq_hash(struct as_rq *arq)
285{
286 hlist_del_init(&arq->hash);
287}
288
289static inline void as_del_arq_hash(struct as_rq *arq)
290{
291 if (!hlist_unhashed(&arq->hash))
292 __as_del_arq_hash(arq);
293}
294
295static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
296{
297 struct request *rq = arq->request;
298
299 BUG_ON(!hlist_unhashed(&arq->hash));
300
301 hlist_add_head(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
302}
303
304/*
305 * move hot entry to front of chain
306 */
307static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq)
308{
309 struct request *rq = arq->request;
310 struct hlist_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
311
312 if (hlist_unhashed(&arq->hash)) {
313 WARN_ON(1);
314 return;
315 }
316
317 if (&arq->hash != head->first) {
318 hlist_del(&arq->hash);
319 hlist_add_head(&arq->hash, head);
320 }
321}
322
323static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
324{
325 struct hlist_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
326 struct hlist_node *entry, *next;
327 struct as_rq *arq;
328
329 hlist_for_each_entry_safe(arq, entry, next, hash_list, hash) {
330 struct request *__rq = arq->request;
331
332 BUG_ON(hlist_unhashed(&arq->hash));
333
334 if (!rq_mergeable(__rq)) {
335 as_del_arq_hash(arq);
336 continue;
337 }
338
339 if (rq_hash_key(__rq) == offset)
340 return __rq;
341 }
342
343 return NULL;
344}
345
346/*
347 * rb tree support functions 269 * rb tree support functions
348 */ 270 */
349#define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node) 271#define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node)
@@ -1060,7 +982,6 @@ static void as_remove_queued_request(request_queue_t *q, struct request *rq)
1060 ad->next_arq[data_dir] = as_find_next_arq(ad, arq); 982 ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
1061 983
1062 list_del_init(&arq->fifo); 984 list_del_init(&arq->fifo);
1063 as_del_arq_hash(arq);
1064 as_del_arq_rb(ad, arq); 985 as_del_arq_rb(ad, arq);
1065} 986}
1066 987
@@ -1349,8 +1270,6 @@ static void as_add_request(request_queue_t *q, struct request *rq)
1349 } 1270 }
1350 1271
1351 as_add_arq_rb(ad, arq); 1272 as_add_arq_rb(ad, arq);
1352 if (rq_mergeable(arq->request))
1353 as_add_arq_hash(ad, arq);
1354 1273
1355 /* 1274 /*
1356 * set expire time (only used for reads) and add to fifo list 1275 * set expire time (only used for reads) and add to fifo list
@@ -1428,42 +1347,17 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
1428 struct as_data *ad = q->elevator->elevator_data; 1347 struct as_data *ad = q->elevator->elevator_data;
1429 sector_t rb_key = bio->bi_sector + bio_sectors(bio); 1348 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
1430 struct request *__rq; 1349 struct request *__rq;
1431 int ret;
1432
1433 /*
1434 * see if the merge hash can satisfy a back merge
1435 */
1436 __rq = as_find_arq_hash(ad, bio->bi_sector);
1437 if (__rq) {
1438 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
1439
1440 if (elv_rq_merge_ok(__rq, bio)) {
1441 ret = ELEVATOR_BACK_MERGE;
1442 goto out;
1443 }
1444 }
1445 1350
1446 /* 1351 /*
1447 * check for front merge 1352 * check for front merge
1448 */ 1353 */
1449 __rq = as_find_arq_rb(ad, rb_key, bio_data_dir(bio)); 1354 __rq = as_find_arq_rb(ad, rb_key, bio_data_dir(bio));
1450 if (__rq) { 1355 if (__rq && elv_rq_merge_ok(__rq, bio)) {
1451 BUG_ON(rb_key != rq_rb_key(__rq)); 1356 *req = __rq;
1452 1357 return ELEVATOR_FRONT_MERGE;
1453 if (elv_rq_merge_ok(__rq, bio)) {
1454 ret = ELEVATOR_FRONT_MERGE;
1455 goto out;
1456 }
1457 } 1358 }
1458 1359
1459 return ELEVATOR_NO_MERGE; 1360 return ELEVATOR_NO_MERGE;
1460out:
1461 if (ret) {
1462 if (rq_mergeable(__rq))
1463 as_hot_arq_hash(ad, RQ_DATA(__rq));
1464 }
1465 *req = __rq;
1466 return ret;
1467} 1361}
1468 1362
1469static void as_merged_request(request_queue_t *q, struct request *req) 1363static void as_merged_request(request_queue_t *q, struct request *req)
@@ -1472,12 +1366,6 @@ static void as_merged_request(request_queue_t *q, struct request *req)
1472 struct as_rq *arq = RQ_DATA(req); 1366 struct as_rq *arq = RQ_DATA(req);
1473 1367
1474 /* 1368 /*
1475 * hash always needs to be repositioned, key is end sector
1476 */
1477 as_del_arq_hash(arq);
1478 as_add_arq_hash(ad, arq);
1479
1480 /*
1481 * if the merge was a front merge, we need to reposition request 1369 * if the merge was a front merge, we need to reposition request
1482 */ 1370 */
1483 if (rq_rb_key(req) != arq->rb_key) { 1371 if (rq_rb_key(req) != arq->rb_key) {
@@ -1501,13 +1389,6 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
1501 BUG_ON(!arq); 1389 BUG_ON(!arq);
1502 BUG_ON(!anext); 1390 BUG_ON(!anext);
1503 1391
1504 /*
1505 * reposition arq (this is the merged request) in hash, and in rbtree
1506 * in case of a front merge
1507 */
1508 as_del_arq_hash(arq);
1509 as_add_arq_hash(ad, arq);
1510
1511 if (rq_rb_key(req) != arq->rb_key) { 1392 if (rq_rb_key(req) != arq->rb_key) {
1512 as_del_arq_rb(ad, arq); 1393 as_del_arq_rb(ad, arq);
1513 as_add_arq_rb(ad, arq); 1394 as_add_arq_rb(ad, arq);
@@ -1591,7 +1472,6 @@ static int as_set_request(request_queue_t *q, struct request *rq,
1591 arq->request = rq; 1472 arq->request = rq;
1592 arq->state = AS_RQ_PRESCHED; 1473 arq->state = AS_RQ_PRESCHED;
1593 arq->io_context = NULL; 1474 arq->io_context = NULL;
1594 INIT_HLIST_NODE(&arq->hash);
1595 INIT_LIST_HEAD(&arq->fifo); 1475 INIT_LIST_HEAD(&arq->fifo);
1596 rq->elevator_private = arq; 1476 rq->elevator_private = arq;
1597 return 0; 1477 return 0;
@@ -1628,7 +1508,6 @@ static void as_exit_queue(elevator_t *e)
1628 1508
1629 mempool_destroy(ad->arq_pool); 1509 mempool_destroy(ad->arq_pool);
1630 put_io_context(ad->io_context); 1510 put_io_context(ad->io_context);
1631 kfree(ad->hash);
1632 kfree(ad); 1511 kfree(ad);
1633} 1512}
1634 1513
@@ -1639,7 +1518,6 @@ static void as_exit_queue(elevator_t *e)
1639static void *as_init_queue(request_queue_t *q, elevator_t *e) 1518static void *as_init_queue(request_queue_t *q, elevator_t *e)
1640{ 1519{
1641 struct as_data *ad; 1520 struct as_data *ad;
1642 int i;
1643 1521
1644 if (!arq_pool) 1522 if (!arq_pool)
1645 return NULL; 1523 return NULL;
@@ -1651,17 +1529,9 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e)
1651 1529
1652 ad->q = q; /* Identify what queue the data belongs to */ 1530 ad->q = q; /* Identify what queue the data belongs to */
1653 1531
1654 ad->hash = kmalloc_node(sizeof(struct hlist_head)*AS_HASH_ENTRIES,
1655 GFP_KERNEL, q->node);
1656 if (!ad->hash) {
1657 kfree(ad);
1658 return NULL;
1659 }
1660
1661 ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 1532 ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
1662 mempool_free_slab, arq_pool, q->node); 1533 mempool_free_slab, arq_pool, q->node);
1663 if (!ad->arq_pool) { 1534 if (!ad->arq_pool) {
1664 kfree(ad->hash);
1665 kfree(ad); 1535 kfree(ad);
1666 return NULL; 1536 return NULL;
1667 } 1537 }
@@ -1672,9 +1542,6 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e)
1672 init_timer(&ad->antic_timer); 1542 init_timer(&ad->antic_timer);
1673 INIT_WORK(&ad->antic_work, as_work_handler, q); 1543 INIT_WORK(&ad->antic_work, as_work_handler, q);
1674 1544
1675 for (i = 0; i < AS_HASH_ENTRIES; i++)
1676 INIT_HLIST_HEAD(&ad->hash[i]);
1677
1678 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); 1545 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
1679 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); 1546 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
1680 ad->sort_list[REQ_SYNC] = RB_ROOT; 1547 ad->sort_list[REQ_SYNC] = RB_ROOT;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3a3aee08ec5f..1b803c0c90f1 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -41,16 +41,6 @@ static DEFINE_SPINLOCK(cfq_exit_lock);
41#define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT) 41#define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT)
42#define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash) 42#define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
43 43
44/*
45 * for the hash of crq inside the cfqq
46 */
47#define CFQ_MHASH_SHIFT 6
48#define CFQ_MHASH_BLOCK(sec) ((sec) >> 3)
49#define CFQ_MHASH_ENTRIES (1 << CFQ_MHASH_SHIFT)
50#define CFQ_MHASH_FN(sec) hash_long(CFQ_MHASH_BLOCK(sec), CFQ_MHASH_SHIFT)
51#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
52#define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash)
53
54#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list) 44#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list)
55#define list_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) 45#define list_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
56 46
@@ -112,11 +102,6 @@ struct cfq_data {
112 */ 102 */
113 struct hlist_head *cfq_hash; 103 struct hlist_head *cfq_hash;
114 104
115 /*
116 * global crq hash for all queues
117 */
118 struct hlist_head *crq_hash;
119
120 mempool_t *crq_pool; 105 mempool_t *crq_pool;
121 106
122 int rq_in_driver; 107 int rq_in_driver;
@@ -203,7 +188,6 @@ struct cfq_rq {
203 struct rb_node rb_node; 188 struct rb_node rb_node;
204 sector_t rb_key; 189 sector_t rb_key;
205 struct request *request; 190 struct request *request;
206 struct hlist_node hash;
207 191
208 struct cfq_queue *cfq_queue; 192 struct cfq_queue *cfq_queue;
209 struct cfq_io_context *io_context; 193 struct cfq_io_context *io_context;
@@ -272,42 +256,6 @@ static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
272static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask); 256static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
273 257
274/* 258/*
275 * lots of deadline iosched dupes, can be abstracted later...
276 */
277static inline void cfq_del_crq_hash(struct cfq_rq *crq)
278{
279 hlist_del_init(&crq->hash);
280}
281
282static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
283{
284 const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
285
286 hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
287}
288
289static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
290{
291 struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
292 struct hlist_node *entry, *next;
293
294 hlist_for_each_safe(entry, next, hash_list) {
295 struct cfq_rq *crq = list_entry_hash(entry);
296 struct request *__rq = crq->request;
297
298 if (!rq_mergeable(__rq)) {
299 cfq_del_crq_hash(crq);
300 continue;
301 }
302
303 if (rq_hash_key(__rq) == offset)
304 return __rq;
305 }
306
307 return NULL;
308}
309
310/*
311 * scheduler run of queue, if there are requests pending and no one in the 259 * scheduler run of queue, if there are requests pending and no one in the
312 * driver that will restart queueing 260 * driver that will restart queueing
313 */ 261 */
@@ -677,7 +625,6 @@ static void cfq_remove_request(struct request *rq)
677 625
678 list_del_init(&rq->queuelist); 626 list_del_init(&rq->queuelist);
679 cfq_del_crq_rb(crq); 627 cfq_del_crq_rb(crq);
680 cfq_del_crq_hash(crq);
681} 628}
682 629
683static int 630static int
@@ -685,34 +632,20 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
685{ 632{
686 struct cfq_data *cfqd = q->elevator->elevator_data; 633 struct cfq_data *cfqd = q->elevator->elevator_data;
687 struct request *__rq; 634 struct request *__rq;
688 int ret;
689
690 __rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
691 if (__rq && elv_rq_merge_ok(__rq, bio)) {
692 ret = ELEVATOR_BACK_MERGE;
693 goto out;
694 }
695 635
696 __rq = cfq_find_rq_fmerge(cfqd, bio); 636 __rq = cfq_find_rq_fmerge(cfqd, bio);
697 if (__rq && elv_rq_merge_ok(__rq, bio)) { 637 if (__rq && elv_rq_merge_ok(__rq, bio)) {
698 ret = ELEVATOR_FRONT_MERGE; 638 *req = __rq;
699 goto out; 639 return ELEVATOR_FRONT_MERGE;
700 } 640 }
701 641
702 return ELEVATOR_NO_MERGE; 642 return ELEVATOR_NO_MERGE;
703out:
704 *req = __rq;
705 return ret;
706} 643}
707 644
708static void cfq_merged_request(request_queue_t *q, struct request *req) 645static void cfq_merged_request(request_queue_t *q, struct request *req)
709{ 646{
710 struct cfq_data *cfqd = q->elevator->elevator_data;
711 struct cfq_rq *crq = RQ_DATA(req); 647 struct cfq_rq *crq = RQ_DATA(req);
712 648
713 cfq_del_crq_hash(crq);
714 cfq_add_crq_hash(cfqd, crq);
715
716 if (rq_rb_key(req) != crq->rb_key) { 649 if (rq_rb_key(req) != crq->rb_key) {
717 struct cfq_queue *cfqq = crq->cfq_queue; 650 struct cfq_queue *cfqq = crq->cfq_queue;
718 651
@@ -1825,9 +1758,6 @@ static void cfq_insert_request(request_queue_t *q, struct request *rq)
1825 1758
1826 list_add_tail(&rq->queuelist, &cfqq->fifo); 1759 list_add_tail(&rq->queuelist, &cfqq->fifo);
1827 1760
1828 if (rq_mergeable(rq))
1829 cfq_add_crq_hash(cfqd, crq);
1830
1831 cfq_crq_enqueued(cfqd, cfqq, crq); 1761 cfq_crq_enqueued(cfqd, cfqq, crq);
1832} 1762}
1833 1763
@@ -2055,7 +1985,6 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
2055 RB_CLEAR_NODE(&crq->rb_node); 1985 RB_CLEAR_NODE(&crq->rb_node);
2056 crq->rb_key = 0; 1986 crq->rb_key = 0;
2057 crq->request = rq; 1987 crq->request = rq;
2058 INIT_HLIST_NODE(&crq->hash);
2059 crq->cfq_queue = cfqq; 1988 crq->cfq_queue = cfqq;
2060 crq->io_context = cic; 1989 crq->io_context = cic;
2061 1990
@@ -2221,7 +2150,6 @@ static void cfq_exit_queue(elevator_t *e)
2221 cfq_shutdown_timer_wq(cfqd); 2150 cfq_shutdown_timer_wq(cfqd);
2222 2151
2223 mempool_destroy(cfqd->crq_pool); 2152 mempool_destroy(cfqd->crq_pool);
2224 kfree(cfqd->crq_hash);
2225 kfree(cfqd->cfq_hash); 2153 kfree(cfqd->cfq_hash);
2226 kfree(cfqd); 2154 kfree(cfqd);
2227} 2155}
@@ -2246,20 +2174,14 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
2246 INIT_LIST_HEAD(&cfqd->empty_list); 2174 INIT_LIST_HEAD(&cfqd->empty_list);
2247 INIT_LIST_HEAD(&cfqd->cic_list); 2175 INIT_LIST_HEAD(&cfqd->cic_list);
2248 2176
2249 cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
2250 if (!cfqd->crq_hash)
2251 goto out_crqhash;
2252
2253 cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL); 2177 cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
2254 if (!cfqd->cfq_hash) 2178 if (!cfqd->cfq_hash)
2255 goto out_cfqhash; 2179 goto out_crqhash;
2256 2180
2257 cfqd->crq_pool = mempool_create_slab_pool(BLKDEV_MIN_RQ, crq_pool); 2181 cfqd->crq_pool = mempool_create_slab_pool(BLKDEV_MIN_RQ, crq_pool);
2258 if (!cfqd->crq_pool) 2182 if (!cfqd->crq_pool)
2259 goto out_crqpool; 2183 goto out_crqpool;
2260 2184
2261 for (i = 0; i < CFQ_MHASH_ENTRIES; i++)
2262 INIT_HLIST_HEAD(&cfqd->crq_hash[i]);
2263 for (i = 0; i < CFQ_QHASH_ENTRIES; i++) 2185 for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
2264 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]); 2186 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
2265 2187
@@ -2289,8 +2211,6 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
2289 return cfqd; 2211 return cfqd;
2290out_crqpool: 2212out_crqpool:
2291 kfree(cfqd->cfq_hash); 2213 kfree(cfqd->cfq_hash);
2292out_cfqhash:
2293 kfree(cfqd->crq_hash);
2294out_crqhash: 2214out_crqhash:
2295 kfree(cfqd); 2215 kfree(cfqd);
2296 return NULL; 2216 return NULL;
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index c7ca9f0b6498..b66e820f544d 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -12,7 +12,6 @@
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/compiler.h> 14#include <linux/compiler.h>
15#include <linux/hash.h>
16#include <linux/rbtree.h> 15#include <linux/rbtree.h>
17 16
18/* 17/*
@@ -24,13 +23,6 @@ static const int writes_starved = 2; /* max times reads can starve a write */
24static const int fifo_batch = 16; /* # of sequential requests treated as one 23static const int fifo_batch = 16; /* # of sequential requests treated as one
25 by the above parameters. For throughput. */ 24 by the above parameters. For throughput. */
26 25
27static const int deadline_hash_shift = 5;
28#define DL_HASH_BLOCK(sec) ((sec) >> 3)
29#define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
30#define DL_HASH_ENTRIES (1 << deadline_hash_shift)
31#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
32#define ON_HASH(drq) (!hlist_unhashed(&(drq)->hash))
33
34struct deadline_data { 26struct deadline_data {
35 /* 27 /*
36 * run time data 28 * run time data
@@ -46,7 +38,6 @@ struct deadline_data {
46 * next in sort order. read, write or both are NULL 38 * next in sort order. read, write or both are NULL
47 */ 39 */
48 struct deadline_rq *next_drq[2]; 40 struct deadline_rq *next_drq[2];
49 struct hlist_head *hash; /* request hash */
50 unsigned int batching; /* number of sequential requests made */ 41 unsigned int batching; /* number of sequential requests made */
51 sector_t last_sector; /* head position */ 42 sector_t last_sector; /* head position */
52 unsigned int starved; /* times reads have starved writes */ 43 unsigned int starved; /* times reads have starved writes */
@@ -75,11 +66,6 @@ struct deadline_rq {
75 struct request *request; 66 struct request *request;
76 67
77 /* 68 /*
78 * request hash, key is the ending offset (for back merge lookup)
79 */
80 struct hlist_node hash;
81
82 /*
83 * expire fifo 69 * expire fifo
84 */ 70 */
85 struct list_head fifo; 71 struct list_head fifo;
@@ -93,69 +79,6 @@ static kmem_cache_t *drq_pool;
93#define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private) 79#define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private)
94 80
95/* 81/*
96 * the back merge hash support functions
97 */
98static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
99{
100 hlist_del_init(&drq->hash);
101}
102
103static inline void deadline_del_drq_hash(struct deadline_rq *drq)
104{
105 if (ON_HASH(drq))
106 __deadline_del_drq_hash(drq);
107}
108
109static inline void
110deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
111{
112 struct request *rq = drq->request;
113
114 BUG_ON(ON_HASH(drq));
115
116 hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
117}
118
119/*
120 * move hot entry to front of chain
121 */
122static inline void
123deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
124{
125 struct request *rq = drq->request;
126 struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
127
128 if (ON_HASH(drq) && &drq->hash != head->first) {
129 hlist_del(&drq->hash);
130 hlist_add_head(&drq->hash, head);
131 }
132}
133
134static struct request *
135deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
136{
137 struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
138 struct hlist_node *entry, *next;
139 struct deadline_rq *drq;
140
141 hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) {
142 struct request *__rq = drq->request;
143
144 BUG_ON(!ON_HASH(drq));
145
146 if (!rq_mergeable(__rq)) {
147 __deadline_del_drq_hash(drq);
148 continue;
149 }
150
151 if (rq_hash_key(__rq) == offset)
152 return __rq;
153 }
154
155 return NULL;
156}
157
158/*
159 * rb tree support functions 82 * rb tree support functions
160 */ 83 */
161#define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node) 84#define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node)
@@ -267,22 +190,19 @@ deadline_add_request(struct request_queue *q, struct request *rq)
267{ 190{
268 struct deadline_data *dd = q->elevator->elevator_data; 191 struct deadline_data *dd = q->elevator->elevator_data;
269 struct deadline_rq *drq = RQ_DATA(rq); 192 struct deadline_rq *drq = RQ_DATA(rq);
270
271 const int data_dir = rq_data_dir(drq->request); 193 const int data_dir = rq_data_dir(drq->request);
272 194
273 deadline_add_drq_rb(dd, drq); 195 deadline_add_drq_rb(dd, drq);
196
274 /* 197 /*
275 * set expire time (only used for reads) and add to fifo list 198 * set expire time (only used for reads) and add to fifo list
276 */ 199 */
277 drq->expires = jiffies + dd->fifo_expire[data_dir]; 200 drq->expires = jiffies + dd->fifo_expire[data_dir];
278 list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]); 201 list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
279
280 if (rq_mergeable(rq))
281 deadline_add_drq_hash(dd, drq);
282} 202}
283 203
284/* 204/*
285 * remove rq from rbtree, fifo, and hash 205 * remove rq from rbtree and fifo.
286 */ 206 */
287static void deadline_remove_request(request_queue_t *q, struct request *rq) 207static void deadline_remove_request(request_queue_t *q, struct request *rq)
288{ 208{
@@ -291,7 +211,6 @@ static void deadline_remove_request(request_queue_t *q, struct request *rq)
291 211
292 list_del_init(&drq->fifo); 212 list_del_init(&drq->fifo);
293 deadline_del_drq_rb(dd, drq); 213 deadline_del_drq_rb(dd, drq);
294 deadline_del_drq_hash(drq);
295} 214}
296 215
297static int 216static int
@@ -302,19 +221,6 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
302 int ret; 221 int ret;
303 222
304 /* 223 /*
305 * see if the merge hash can satisfy a back merge
306 */
307 __rq = deadline_find_drq_hash(dd, bio->bi_sector);
308 if (__rq) {
309 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
310
311 if (elv_rq_merge_ok(__rq, bio)) {
312 ret = ELEVATOR_BACK_MERGE;
313 goto out;
314 }
315 }
316
317 /*
318 * check for front merge 224 * check for front merge
319 */ 225 */
320 if (dd->front_merges) { 226 if (dd->front_merges) {
@@ -333,8 +239,6 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
333 239
334 return ELEVATOR_NO_MERGE; 240 return ELEVATOR_NO_MERGE;
335out: 241out:
336 if (ret)
337 deadline_hot_drq_hash(dd, RQ_DATA(__rq));
338 *req = __rq; 242 *req = __rq;
339 return ret; 243 return ret;
340} 244}
@@ -345,12 +249,6 @@ static void deadline_merged_request(request_queue_t *q, struct request *req)
345 struct deadline_rq *drq = RQ_DATA(req); 249 struct deadline_rq *drq = RQ_DATA(req);
346 250
347 /* 251 /*
348 * hash always needs to be repositioned, key is end sector
349 */
350 deadline_del_drq_hash(drq);
351 deadline_add_drq_hash(dd, drq);
352
353 /*
354 * if the merge was a front merge, we need to reposition request 252 * if the merge was a front merge, we need to reposition request
355 */ 253 */
356 if (rq_rb_key(req) != drq->rb_key) { 254 if (rq_rb_key(req) != drq->rb_key) {
@@ -370,13 +268,6 @@ deadline_merged_requests(request_queue_t *q, struct request *req,
370 BUG_ON(!drq); 268 BUG_ON(!drq);
371 BUG_ON(!dnext); 269 BUG_ON(!dnext);
372 270
373 /*
374 * reposition drq (this is the merged request) in hash, and in rbtree
375 * in case of a front merge
376 */
377 deadline_del_drq_hash(drq);
378 deadline_add_drq_hash(dd, drq);
379
380 if (rq_rb_key(req) != drq->rb_key) { 271 if (rq_rb_key(req) != drq->rb_key) {
381 deadline_del_drq_rb(dd, drq); 272 deadline_del_drq_rb(dd, drq);
382 deadline_add_drq_rb(dd, drq); 273 deadline_add_drq_rb(dd, drq);
@@ -594,7 +485,6 @@ static void deadline_exit_queue(elevator_t *e)
594 BUG_ON(!list_empty(&dd->fifo_list[WRITE])); 485 BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
595 486
596 mempool_destroy(dd->drq_pool); 487 mempool_destroy(dd->drq_pool);
597 kfree(dd->hash);
598 kfree(dd); 488 kfree(dd);
599} 489}
600 490
@@ -605,7 +495,6 @@ static void deadline_exit_queue(elevator_t *e)
605static void *deadline_init_queue(request_queue_t *q, elevator_t *e) 495static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
606{ 496{
607 struct deadline_data *dd; 497 struct deadline_data *dd;
608 int i;
609 498
610 if (!drq_pool) 499 if (!drq_pool)
611 return NULL; 500 return NULL;
@@ -615,24 +504,13 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
615 return NULL; 504 return NULL;
616 memset(dd, 0, sizeof(*dd)); 505 memset(dd, 0, sizeof(*dd));
617 506
618 dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES,
619 GFP_KERNEL, q->node);
620 if (!dd->hash) {
621 kfree(dd);
622 return NULL;
623 }
624
625 dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 507 dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
626 mempool_free_slab, drq_pool, q->node); 508 mempool_free_slab, drq_pool, q->node);
627 if (!dd->drq_pool) { 509 if (!dd->drq_pool) {
628 kfree(dd->hash);
629 kfree(dd); 510 kfree(dd);
630 return NULL; 511 return NULL;
631 } 512 }
632 513
633 for (i = 0; i < DL_HASH_ENTRIES; i++)
634 INIT_HLIST_HEAD(&dd->hash[i]);
635
636 INIT_LIST_HEAD(&dd->fifo_list[READ]); 514 INIT_LIST_HEAD(&dd->fifo_list[READ]);
637 INIT_LIST_HEAD(&dd->fifo_list[WRITE]); 515 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
638 dd->sort_list[READ] = RB_ROOT; 516 dd->sort_list[READ] = RB_ROOT;
@@ -667,8 +545,6 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
667 RB_CLEAR_NODE(&drq->rb_node); 545 RB_CLEAR_NODE(&drq->rb_node);
668 drq->request = rq; 546 drq->request = rq;
669 547
670 INIT_HLIST_NODE(&drq->hash);
671
672 INIT_LIST_HEAD(&drq->fifo); 548 INIT_LIST_HEAD(&drq->fifo);
673 549
674 rq->elevator_private = drq; 550 rq->elevator_private = drq;
diff --git a/block/elevator.c b/block/elevator.c
index 4ac97b642042..cff1102dac9d 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -33,6 +33,7 @@
33#include <linux/compiler.h> 33#include <linux/compiler.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/blktrace_api.h> 35#include <linux/blktrace_api.h>
36#include <linux/hash.h>
36 37
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38 39
@@ -40,6 +41,16 @@ static DEFINE_SPINLOCK(elv_list_lock);
40static LIST_HEAD(elv_list); 41static LIST_HEAD(elv_list);
41 42
42/* 43/*
44 * Merge hash stuff.
45 */
46static const int elv_hash_shift = 6;
47#define ELV_HASH_BLOCK(sec) ((sec) >> 3)
48#define ELV_HASH_FN(sec) (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
49#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
50#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
51#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
52
53/*
43 * can we safely merge with this request? 54 * can we safely merge with this request?
44 */ 55 */
45inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) 56inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
@@ -153,25 +164,41 @@ static struct kobj_type elv_ktype;
153 164
154static elevator_t *elevator_alloc(struct elevator_type *e) 165static elevator_t *elevator_alloc(struct elevator_type *e)
155{ 166{
156 elevator_t *eq = kmalloc(sizeof(elevator_t), GFP_KERNEL); 167 elevator_t *eq;
157 if (eq) { 168 int i;
158 memset(eq, 0, sizeof(*eq)); 169
159 eq->ops = &e->ops; 170 eq = kmalloc(sizeof(elevator_t), GFP_KERNEL);
160 eq->elevator_type = e; 171 if (unlikely(!eq))
161 kobject_init(&eq->kobj); 172 goto err;
162 snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched"); 173
163 eq->kobj.ktype = &elv_ktype; 174 memset(eq, 0, sizeof(*eq));
164 mutex_init(&eq->sysfs_lock); 175 eq->ops = &e->ops;
165 } else { 176 eq->elevator_type = e;
166 elevator_put(e); 177 kobject_init(&eq->kobj);
167 } 178 snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
179 eq->kobj.ktype = &elv_ktype;
180 mutex_init(&eq->sysfs_lock);
181
182 eq->hash = kmalloc(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, GFP_KERNEL);
183 if (!eq->hash)
184 goto err;
185
186 for (i = 0; i < ELV_HASH_ENTRIES; i++)
187 INIT_HLIST_HEAD(&eq->hash[i]);
188
168 return eq; 189 return eq;
190err:
191 kfree(eq);
192 elevator_put(e);
193 return NULL;
169} 194}
170 195
171static void elevator_release(struct kobject *kobj) 196static void elevator_release(struct kobject *kobj)
172{ 197{
173 elevator_t *e = container_of(kobj, elevator_t, kobj); 198 elevator_t *e = container_of(kobj, elevator_t, kobj);
199
174 elevator_put(e->elevator_type); 200 elevator_put(e->elevator_type);
201 kfree(e->hash);
175 kfree(e); 202 kfree(e);
176} 203}
177 204
@@ -223,6 +250,53 @@ void elevator_exit(elevator_t *e)
223 kobject_put(&e->kobj); 250 kobject_put(&e->kobj);
224} 251}
225 252
253static inline void __elv_rqhash_del(struct request *rq)
254{
255 hlist_del_init(&rq->hash);
256}
257
258static void elv_rqhash_del(request_queue_t *q, struct request *rq)
259{
260 if (ELV_ON_HASH(rq))
261 __elv_rqhash_del(rq);
262}
263
264static void elv_rqhash_add(request_queue_t *q, struct request *rq)
265{
266 elevator_t *e = q->elevator;
267
268 BUG_ON(ELV_ON_HASH(rq));
269 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
270}
271
272static void elv_rqhash_reposition(request_queue_t *q, struct request *rq)
273{
274 __elv_rqhash_del(rq);
275 elv_rqhash_add(q, rq);
276}
277
278static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset)
279{
280 elevator_t *e = q->elevator;
281 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
282 struct hlist_node *entry, *next;
283 struct request *rq;
284
285 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
286 BUG_ON(!ELV_ON_HASH(rq));
287
288 if (unlikely(!rq_mergeable(rq))) {
289 __elv_rqhash_del(rq);
290 continue;
291 }
292
293 if (rq_hash_key(rq) == offset)
294 return rq;
295 }
296
297 return NULL;
298}
299
226/* 300/*
227 * Insert rq into dispatch queue of q. Queue lock must be held on 301 * Insert rq into dispatch queue of q. Queue lock must be held on
228 * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be 302 * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be
@@ -235,6 +309,9 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq)
235 309
236 if (q->last_merge == rq) 310 if (q->last_merge == rq)
237 q->last_merge = NULL; 311 q->last_merge = NULL;
312
313 elv_rqhash_del(q, rq);
314
238 q->nr_sorted--; 315 q->nr_sorted--;
239 316
240 boundary = q->end_sector; 317 boundary = q->end_sector;
@@ -258,11 +335,32 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq)
258 list_add(&rq->queuelist, entry); 335 list_add(&rq->queuelist, entry);
259} 336}
260 337
338/*
339 * This should be in elevator.h, but that requires pulling in rq and q
340 */
341void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
342{
343 if (q->last_merge == rq)
344 q->last_merge = NULL;
345
346 elv_rqhash_del(q, rq);
347
348 q->nr_sorted--;
349
350 q->end_sector = rq_end_sector(rq);
351 q->boundary_rq = rq;
352 list_add_tail(&rq->queuelist, &q->queue_head);
353}
354
261int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) 355int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
262{ 356{
263 elevator_t *e = q->elevator; 357 elevator_t *e = q->elevator;
358 struct request *__rq;
264 int ret; 359 int ret;
265 360
361 /*
362 * First try one-hit cache.
363 */
266 if (q->last_merge) { 364 if (q->last_merge) {
267 ret = elv_try_merge(q->last_merge, bio); 365 ret = elv_try_merge(q->last_merge, bio);
268 if (ret != ELEVATOR_NO_MERGE) { 366 if (ret != ELEVATOR_NO_MERGE) {
@@ -271,6 +369,15 @@ int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
271 } 369 }
272 } 370 }
273 371
372 /*
373 * See if our hash lookup can find a potential backmerge.
374 */
375 __rq = elv_rqhash_find(q, bio->bi_sector);
376 if (__rq && elv_rq_merge_ok(__rq, bio)) {
377 *req = __rq;
378 return ELEVATOR_BACK_MERGE;
379 }
380
274 if (e->ops->elevator_merge_fn) 381 if (e->ops->elevator_merge_fn)
275 return e->ops->elevator_merge_fn(q, req, bio); 382 return e->ops->elevator_merge_fn(q, req, bio);
276 383
@@ -284,6 +391,8 @@ void elv_merged_request(request_queue_t *q, struct request *rq)
284 if (e->ops->elevator_merged_fn) 391 if (e->ops->elevator_merged_fn)
285 e->ops->elevator_merged_fn(q, rq); 392 e->ops->elevator_merged_fn(q, rq);
286 393
394 elv_rqhash_reposition(q, rq);
395
287 q->last_merge = rq; 396 q->last_merge = rq;
288} 397}
289 398
@@ -294,8 +403,11 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
294 403
295 if (e->ops->elevator_merge_req_fn) 404 if (e->ops->elevator_merge_req_fn)
296 e->ops->elevator_merge_req_fn(q, rq, next); 405 e->ops->elevator_merge_req_fn(q, rq, next);
297 q->nr_sorted--;
298 406
407 elv_rqhash_reposition(q, rq);
408 elv_rqhash_del(q, next);
409
410 q->nr_sorted--;
299 q->last_merge = rq; 411 q->last_merge = rq;
300} 412}
301 413
@@ -371,8 +483,12 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
371 BUG_ON(!blk_fs_request(rq)); 483 BUG_ON(!blk_fs_request(rq));
372 rq->cmd_flags |= REQ_SORTED; 484 rq->cmd_flags |= REQ_SORTED;
373 q->nr_sorted++; 485 q->nr_sorted++;
374 if (q->last_merge == NULL && rq_mergeable(rq)) 486 if (rq_mergeable(rq)) {
375 q->last_merge = rq; 487 elv_rqhash_add(q, rq);
488 if (!q->last_merge)
489 q->last_merge = rq;
490 }
491
376 /* 492 /*
377 * Some ioscheds (cfq) run q->request_fn directly, so 493 * Some ioscheds (cfq) run q->request_fn directly, so
378 * rq cannot be accessed after calling 494 * rq cannot be accessed after calling
@@ -557,6 +673,7 @@ struct request *elv_next_request(request_queue_t *q)
557void elv_dequeue_request(request_queue_t *q, struct request *rq) 673void elv_dequeue_request(request_queue_t *q, struct request *rq)
558{ 674{
559 BUG_ON(list_empty(&rq->queuelist)); 675 BUG_ON(list_empty(&rq->queuelist));
676 BUG_ON(ELV_ON_HASH(rq));
560 677
561 list_del_init(&rq->queuelist); 678 list_del_init(&rq->queuelist);
562 679
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 9b91bb70c5ed..9cbf7b550c78 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -281,6 +281,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
281{ 281{
282 INIT_LIST_HEAD(&rq->queuelist); 282 INIT_LIST_HEAD(&rq->queuelist);
283 INIT_LIST_HEAD(&rq->donelist); 283 INIT_LIST_HEAD(&rq->donelist);
284 INIT_HLIST_NODE(&rq->hash);
284 285
285 rq->errors = 0; 286 rq->errors = 0;
286 rq->rq_status = RQ_ACTIVE; 287 rq->rq_status = RQ_ACTIVE;
@@ -2700,6 +2701,7 @@ void __blk_put_request(request_queue_t *q, struct request *req)
2700 int priv = req->cmd_flags & REQ_ELVPRIV; 2701 int priv = req->cmd_flags & REQ_ELVPRIV;
2701 2702
2702 BUG_ON(!list_empty(&req->queuelist)); 2703 BUG_ON(!list_empty(&req->queuelist));
2704 BUG_ON(!hlist_unhashed(&req->hash));
2703 2705
2704 blk_free_request(q, req); 2706 blk_free_request(q, req);
2705 freed_request(q, rw, priv); 2707 freed_request(q, rw, priv);