diff options
author | Jens Axboe <axboe@suse.de> | 2006-07-28 03:23:08 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-09-30 14:26:56 -0400 |
commit | 9817064b68fef7e4580c6df1ea597e106b9ff88b (patch) | |
tree | 76c27990626247613e9efa45b792d51ad79635d7 /block/cfq-iosched.c | |
parent | 4aff5e2333c9a1609662f2091f55c3f6fffdad36 (diff) |
[PATCH] elevator: move the backmerging logic into the elevator core
Right now, every IO scheduler implements its own backmerging (except for
noop, which does no merging). That results in duplicated code for
essentially the same operation, which is never a good thing. This patch
moves the backmerging out of the io schedulers and into the elevator
core. We save 1.6kb of text and as a bonus get backmerging for noop as
well. Win-win!
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 86 |
1 files changed, 3 insertions, 83 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 3a3aee08ec5f..1b803c0c90f1 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -41,16 +41,6 @@ static DEFINE_SPINLOCK(cfq_exit_lock); | |||
41 | #define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT) | 41 | #define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT) |
42 | #define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash) | 42 | #define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash) |
43 | 43 | ||
44 | /* | ||
45 | * for the hash of crq inside the cfqq | ||
46 | */ | ||
47 | #define CFQ_MHASH_SHIFT 6 | ||
48 | #define CFQ_MHASH_BLOCK(sec) ((sec) >> 3) | ||
49 | #define CFQ_MHASH_ENTRIES (1 << CFQ_MHASH_SHIFT) | ||
50 | #define CFQ_MHASH_FN(sec) hash_long(CFQ_MHASH_BLOCK(sec), CFQ_MHASH_SHIFT) | ||
51 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) | ||
52 | #define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash) | ||
53 | |||
54 | #define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list) | 44 | #define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list) |
55 | #define list_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) | 45 | #define list_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) |
56 | 46 | ||
@@ -112,11 +102,6 @@ struct cfq_data { | |||
112 | */ | 102 | */ |
113 | struct hlist_head *cfq_hash; | 103 | struct hlist_head *cfq_hash; |
114 | 104 | ||
115 | /* | ||
116 | * global crq hash for all queues | ||
117 | */ | ||
118 | struct hlist_head *crq_hash; | ||
119 | |||
120 | mempool_t *crq_pool; | 105 | mempool_t *crq_pool; |
121 | 106 | ||
122 | int rq_in_driver; | 107 | int rq_in_driver; |
@@ -203,7 +188,6 @@ struct cfq_rq { | |||
203 | struct rb_node rb_node; | 188 | struct rb_node rb_node; |
204 | sector_t rb_key; | 189 | sector_t rb_key; |
205 | struct request *request; | 190 | struct request *request; |
206 | struct hlist_node hash; | ||
207 | 191 | ||
208 | struct cfq_queue *cfq_queue; | 192 | struct cfq_queue *cfq_queue; |
209 | struct cfq_io_context *io_context; | 193 | struct cfq_io_context *io_context; |
@@ -272,42 +256,6 @@ static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); | |||
272 | static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask); | 256 | static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask); |
273 | 257 | ||
274 | /* | 258 | /* |
275 | * lots of deadline iosched dupes, can be abstracted later... | ||
276 | */ | ||
277 | static inline void cfq_del_crq_hash(struct cfq_rq *crq) | ||
278 | { | ||
279 | hlist_del_init(&crq->hash); | ||
280 | } | ||
281 | |||
282 | static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq) | ||
283 | { | ||
284 | const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request)); | ||
285 | |||
286 | hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]); | ||
287 | } | ||
288 | |||
289 | static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset) | ||
290 | { | ||
291 | struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)]; | ||
292 | struct hlist_node *entry, *next; | ||
293 | |||
294 | hlist_for_each_safe(entry, next, hash_list) { | ||
295 | struct cfq_rq *crq = list_entry_hash(entry); | ||
296 | struct request *__rq = crq->request; | ||
297 | |||
298 | if (!rq_mergeable(__rq)) { | ||
299 | cfq_del_crq_hash(crq); | ||
300 | continue; | ||
301 | } | ||
302 | |||
303 | if (rq_hash_key(__rq) == offset) | ||
304 | return __rq; | ||
305 | } | ||
306 | |||
307 | return NULL; | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * scheduler run of queue, if there are requests pending and no one in the | 259 | * scheduler run of queue, if there are requests pending and no one in the |
312 | * driver that will restart queueing | 260 | * driver that will restart queueing |
313 | */ | 261 | */ |
@@ -677,7 +625,6 @@ static void cfq_remove_request(struct request *rq) | |||
677 | 625 | ||
678 | list_del_init(&rq->queuelist); | 626 | list_del_init(&rq->queuelist); |
679 | cfq_del_crq_rb(crq); | 627 | cfq_del_crq_rb(crq); |
680 | cfq_del_crq_hash(crq); | ||
681 | } | 628 | } |
682 | 629 | ||
683 | static int | 630 | static int |
@@ -685,34 +632,20 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) | |||
685 | { | 632 | { |
686 | struct cfq_data *cfqd = q->elevator->elevator_data; | 633 | struct cfq_data *cfqd = q->elevator->elevator_data; |
687 | struct request *__rq; | 634 | struct request *__rq; |
688 | int ret; | ||
689 | |||
690 | __rq = cfq_find_rq_hash(cfqd, bio->bi_sector); | ||
691 | if (__rq && elv_rq_merge_ok(__rq, bio)) { | ||
692 | ret = ELEVATOR_BACK_MERGE; | ||
693 | goto out; | ||
694 | } | ||
695 | 635 | ||
696 | __rq = cfq_find_rq_fmerge(cfqd, bio); | 636 | __rq = cfq_find_rq_fmerge(cfqd, bio); |
697 | if (__rq && elv_rq_merge_ok(__rq, bio)) { | 637 | if (__rq && elv_rq_merge_ok(__rq, bio)) { |
698 | ret = ELEVATOR_FRONT_MERGE; | 638 | *req = __rq; |
699 | goto out; | 639 | return ELEVATOR_FRONT_MERGE; |
700 | } | 640 | } |
701 | 641 | ||
702 | return ELEVATOR_NO_MERGE; | 642 | return ELEVATOR_NO_MERGE; |
703 | out: | ||
704 | *req = __rq; | ||
705 | return ret; | ||
706 | } | 643 | } |
707 | 644 | ||
708 | static void cfq_merged_request(request_queue_t *q, struct request *req) | 645 | static void cfq_merged_request(request_queue_t *q, struct request *req) |
709 | { | 646 | { |
710 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
711 | struct cfq_rq *crq = RQ_DATA(req); | 647 | struct cfq_rq *crq = RQ_DATA(req); |
712 | 648 | ||
713 | cfq_del_crq_hash(crq); | ||
714 | cfq_add_crq_hash(cfqd, crq); | ||
715 | |||
716 | if (rq_rb_key(req) != crq->rb_key) { | 649 | if (rq_rb_key(req) != crq->rb_key) { |
717 | struct cfq_queue *cfqq = crq->cfq_queue; | 650 | struct cfq_queue *cfqq = crq->cfq_queue; |
718 | 651 | ||
@@ -1825,9 +1758,6 @@ static void cfq_insert_request(request_queue_t *q, struct request *rq) | |||
1825 | 1758 | ||
1826 | list_add_tail(&rq->queuelist, &cfqq->fifo); | 1759 | list_add_tail(&rq->queuelist, &cfqq->fifo); |
1827 | 1760 | ||
1828 | if (rq_mergeable(rq)) | ||
1829 | cfq_add_crq_hash(cfqd, crq); | ||
1830 | |||
1831 | cfq_crq_enqueued(cfqd, cfqq, crq); | 1761 | cfq_crq_enqueued(cfqd, cfqq, crq); |
1832 | } | 1762 | } |
1833 | 1763 | ||
@@ -2055,7 +1985,6 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
2055 | RB_CLEAR_NODE(&crq->rb_node); | 1985 | RB_CLEAR_NODE(&crq->rb_node); |
2056 | crq->rb_key = 0; | 1986 | crq->rb_key = 0; |
2057 | crq->request = rq; | 1987 | crq->request = rq; |
2058 | INIT_HLIST_NODE(&crq->hash); | ||
2059 | crq->cfq_queue = cfqq; | 1988 | crq->cfq_queue = cfqq; |
2060 | crq->io_context = cic; | 1989 | crq->io_context = cic; |
2061 | 1990 | ||
@@ -2221,7 +2150,6 @@ static void cfq_exit_queue(elevator_t *e) | |||
2221 | cfq_shutdown_timer_wq(cfqd); | 2150 | cfq_shutdown_timer_wq(cfqd); |
2222 | 2151 | ||
2223 | mempool_destroy(cfqd->crq_pool); | 2152 | mempool_destroy(cfqd->crq_pool); |
2224 | kfree(cfqd->crq_hash); | ||
2225 | kfree(cfqd->cfq_hash); | 2153 | kfree(cfqd->cfq_hash); |
2226 | kfree(cfqd); | 2154 | kfree(cfqd); |
2227 | } | 2155 | } |
@@ -2246,20 +2174,14 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
2246 | INIT_LIST_HEAD(&cfqd->empty_list); | 2174 | INIT_LIST_HEAD(&cfqd->empty_list); |
2247 | INIT_LIST_HEAD(&cfqd->cic_list); | 2175 | INIT_LIST_HEAD(&cfqd->cic_list); |
2248 | 2176 | ||
2249 | cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); | ||
2250 | if (!cfqd->crq_hash) | ||
2251 | goto out_crqhash; | ||
2252 | |||
2253 | cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL); | 2177 | cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL); |
2254 | if (!cfqd->cfq_hash) | 2178 | if (!cfqd->cfq_hash) |
2255 | goto out_cfqhash; | 2179 | goto out_crqhash; |
2256 | 2180 | ||
2257 | cfqd->crq_pool = mempool_create_slab_pool(BLKDEV_MIN_RQ, crq_pool); | 2181 | cfqd->crq_pool = mempool_create_slab_pool(BLKDEV_MIN_RQ, crq_pool); |
2258 | if (!cfqd->crq_pool) | 2182 | if (!cfqd->crq_pool) |
2259 | goto out_crqpool; | 2183 | goto out_crqpool; |
2260 | 2184 | ||
2261 | for (i = 0; i < CFQ_MHASH_ENTRIES; i++) | ||
2262 | INIT_HLIST_HEAD(&cfqd->crq_hash[i]); | ||
2263 | for (i = 0; i < CFQ_QHASH_ENTRIES; i++) | 2185 | for (i = 0; i < CFQ_QHASH_ENTRIES; i++) |
2264 | INIT_HLIST_HEAD(&cfqd->cfq_hash[i]); | 2186 | INIT_HLIST_HEAD(&cfqd->cfq_hash[i]); |
2265 | 2187 | ||
@@ -2289,8 +2211,6 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
2289 | return cfqd; | 2211 | return cfqd; |
2290 | out_crqpool: | 2212 | out_crqpool: |
2291 | kfree(cfqd->cfq_hash); | 2213 | kfree(cfqd->cfq_hash); |
2292 | out_cfqhash: | ||
2293 | kfree(cfqd->crq_hash); | ||
2294 | out_crqhash: | 2214 | out_crqhash: |
2295 | kfree(cfqd); | 2215 | kfree(cfqd); |
2296 | return NULL; | 2216 | return NULL; |