diff options
author | Jens Axboe <axboe@suse.de> | 2006-07-28 03:23:08 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-09-30 14:26:56 -0400 |
commit | 9817064b68fef7e4580c6df1ea597e106b9ff88b (patch) | |
tree | 76c27990626247613e9efa45b792d51ad79635d7 /block/deadline-iosched.c | |
parent | 4aff5e2333c9a1609662f2091f55c3f6fffdad36 (diff) |
[PATCH] elevator: move the backmerging logic into the elevator core
Right now, every IO scheduler implements its own backmerging (except for
noop, which does no merging). That results in duplicated code for
essentially the same operation, which is never a good thing. This patch
moves the backmerging out of the io schedulers and into the elevator
core. We save 1.6kb of text and as a bonus get backmerging for noop as
well. Win-win!
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/deadline-iosched.c')
-rw-r--r-- | block/deadline-iosched.c | 128 |
1 files changed, 2 insertions, 126 deletions
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index c7ca9f0b6498..b66e820f544d 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/compiler.h> | 14 | #include <linux/compiler.h> |
15 | #include <linux/hash.h> | ||
16 | #include <linux/rbtree.h> | 15 | #include <linux/rbtree.h> |
17 | 16 | ||
18 | /* | 17 | /* |
@@ -24,13 +23,6 @@ static const int writes_starved = 2; /* max times reads can starve a write */ | |||
24 | static const int fifo_batch = 16; /* # of sequential requests treated as one | 23 | static const int fifo_batch = 16; /* # of sequential requests treated as one |
25 | by the above parameters. For throughput. */ | 24 | by the above parameters. For throughput. */ |
26 | 25 | ||
27 | static const int deadline_hash_shift = 5; | ||
28 | #define DL_HASH_BLOCK(sec) ((sec) >> 3) | ||
29 | #define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift)) | ||
30 | #define DL_HASH_ENTRIES (1 << deadline_hash_shift) | ||
31 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) | ||
32 | #define ON_HASH(drq) (!hlist_unhashed(&(drq)->hash)) | ||
33 | |||
34 | struct deadline_data { | 26 | struct deadline_data { |
35 | /* | 27 | /* |
36 | * run time data | 28 | * run time data |
@@ -46,7 +38,6 @@ struct deadline_data { | |||
46 | * next in sort order. read, write or both are NULL | 38 | * next in sort order. read, write or both are NULL |
47 | */ | 39 | */ |
48 | struct deadline_rq *next_drq[2]; | 40 | struct deadline_rq *next_drq[2]; |
49 | struct hlist_head *hash; /* request hash */ | ||
50 | unsigned int batching; /* number of sequential requests made */ | 41 | unsigned int batching; /* number of sequential requests made */ |
51 | sector_t last_sector; /* head position */ | 42 | sector_t last_sector; /* head position */ |
52 | unsigned int starved; /* times reads have starved writes */ | 43 | unsigned int starved; /* times reads have starved writes */ |
@@ -75,11 +66,6 @@ struct deadline_rq { | |||
75 | struct request *request; | 66 | struct request *request; |
76 | 67 | ||
77 | /* | 68 | /* |
78 | * request hash, key is the ending offset (for back merge lookup) | ||
79 | */ | ||
80 | struct hlist_node hash; | ||
81 | |||
82 | /* | ||
83 | * expire fifo | 69 | * expire fifo |
84 | */ | 70 | */ |
85 | struct list_head fifo; | 71 | struct list_head fifo; |
@@ -93,69 +79,6 @@ static kmem_cache_t *drq_pool; | |||
93 | #define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private) | 79 | #define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private) |
94 | 80 | ||
95 | /* | 81 | /* |
96 | * the back merge hash support functions | ||
97 | */ | ||
98 | static inline void __deadline_del_drq_hash(struct deadline_rq *drq) | ||
99 | { | ||
100 | hlist_del_init(&drq->hash); | ||
101 | } | ||
102 | |||
103 | static inline void deadline_del_drq_hash(struct deadline_rq *drq) | ||
104 | { | ||
105 | if (ON_HASH(drq)) | ||
106 | __deadline_del_drq_hash(drq); | ||
107 | } | ||
108 | |||
109 | static inline void | ||
110 | deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) | ||
111 | { | ||
112 | struct request *rq = drq->request; | ||
113 | |||
114 | BUG_ON(ON_HASH(drq)); | ||
115 | |||
116 | hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]); | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * move hot entry to front of chain | ||
121 | */ | ||
122 | static inline void | ||
123 | deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) | ||
124 | { | ||
125 | struct request *rq = drq->request; | ||
126 | struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))]; | ||
127 | |||
128 | if (ON_HASH(drq) && &drq->hash != head->first) { | ||
129 | hlist_del(&drq->hash); | ||
130 | hlist_add_head(&drq->hash, head); | ||
131 | } | ||
132 | } | ||
133 | |||
134 | static struct request * | ||
135 | deadline_find_drq_hash(struct deadline_data *dd, sector_t offset) | ||
136 | { | ||
137 | struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)]; | ||
138 | struct hlist_node *entry, *next; | ||
139 | struct deadline_rq *drq; | ||
140 | |||
141 | hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) { | ||
142 | struct request *__rq = drq->request; | ||
143 | |||
144 | BUG_ON(!ON_HASH(drq)); | ||
145 | |||
146 | if (!rq_mergeable(__rq)) { | ||
147 | __deadline_del_drq_hash(drq); | ||
148 | continue; | ||
149 | } | ||
150 | |||
151 | if (rq_hash_key(__rq) == offset) | ||
152 | return __rq; | ||
153 | } | ||
154 | |||
155 | return NULL; | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * rb tree support functions | 82 | * rb tree support functions |
160 | */ | 83 | */ |
161 | #define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node) | 84 | #define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node) |
@@ -267,22 +190,19 @@ deadline_add_request(struct request_queue *q, struct request *rq) | |||
267 | { | 190 | { |
268 | struct deadline_data *dd = q->elevator->elevator_data; | 191 | struct deadline_data *dd = q->elevator->elevator_data; |
269 | struct deadline_rq *drq = RQ_DATA(rq); | 192 | struct deadline_rq *drq = RQ_DATA(rq); |
270 | |||
271 | const int data_dir = rq_data_dir(drq->request); | 193 | const int data_dir = rq_data_dir(drq->request); |
272 | 194 | ||
273 | deadline_add_drq_rb(dd, drq); | 195 | deadline_add_drq_rb(dd, drq); |
196 | |||
274 | /* | 197 | /* |
275 | * set expire time (only used for reads) and add to fifo list | 198 | * set expire time (only used for reads) and add to fifo list |
276 | */ | 199 | */ |
277 | drq->expires = jiffies + dd->fifo_expire[data_dir]; | 200 | drq->expires = jiffies + dd->fifo_expire[data_dir]; |
278 | list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]); | 201 | list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]); |
279 | |||
280 | if (rq_mergeable(rq)) | ||
281 | deadline_add_drq_hash(dd, drq); | ||
282 | } | 202 | } |
283 | 203 | ||
284 | /* | 204 | /* |
285 | * remove rq from rbtree, fifo, and hash | 205 | * remove rq from rbtree and fifo. |
286 | */ | 206 | */ |
287 | static void deadline_remove_request(request_queue_t *q, struct request *rq) | 207 | static void deadline_remove_request(request_queue_t *q, struct request *rq) |
288 | { | 208 | { |
@@ -291,7 +211,6 @@ static void deadline_remove_request(request_queue_t *q, struct request *rq) | |||
291 | 211 | ||
292 | list_del_init(&drq->fifo); | 212 | list_del_init(&drq->fifo); |
293 | deadline_del_drq_rb(dd, drq); | 213 | deadline_del_drq_rb(dd, drq); |
294 | deadline_del_drq_hash(drq); | ||
295 | } | 214 | } |
296 | 215 | ||
297 | static int | 216 | static int |
@@ -302,19 +221,6 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) | |||
302 | int ret; | 221 | int ret; |
303 | 222 | ||
304 | /* | 223 | /* |
305 | * see if the merge hash can satisfy a back merge | ||
306 | */ | ||
307 | __rq = deadline_find_drq_hash(dd, bio->bi_sector); | ||
308 | if (__rq) { | ||
309 | BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector); | ||
310 | |||
311 | if (elv_rq_merge_ok(__rq, bio)) { | ||
312 | ret = ELEVATOR_BACK_MERGE; | ||
313 | goto out; | ||
314 | } | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * check for front merge | 224 | * check for front merge |
319 | */ | 225 | */ |
320 | if (dd->front_merges) { | 226 | if (dd->front_merges) { |
@@ -333,8 +239,6 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) | |||
333 | 239 | ||
334 | return ELEVATOR_NO_MERGE; | 240 | return ELEVATOR_NO_MERGE; |
335 | out: | 241 | out: |
336 | if (ret) | ||
337 | deadline_hot_drq_hash(dd, RQ_DATA(__rq)); | ||
338 | *req = __rq; | 242 | *req = __rq; |
339 | return ret; | 243 | return ret; |
340 | } | 244 | } |
@@ -345,12 +249,6 @@ static void deadline_merged_request(request_queue_t *q, struct request *req) | |||
345 | struct deadline_rq *drq = RQ_DATA(req); | 249 | struct deadline_rq *drq = RQ_DATA(req); |
346 | 250 | ||
347 | /* | 251 | /* |
348 | * hash always needs to be repositioned, key is end sector | ||
349 | */ | ||
350 | deadline_del_drq_hash(drq); | ||
351 | deadline_add_drq_hash(dd, drq); | ||
352 | |||
353 | /* | ||
354 | * if the merge was a front merge, we need to reposition request | 252 | * if the merge was a front merge, we need to reposition request |
355 | */ | 253 | */ |
356 | if (rq_rb_key(req) != drq->rb_key) { | 254 | if (rq_rb_key(req) != drq->rb_key) { |
@@ -370,13 +268,6 @@ deadline_merged_requests(request_queue_t *q, struct request *req, | |||
370 | BUG_ON(!drq); | 268 | BUG_ON(!drq); |
371 | BUG_ON(!dnext); | 269 | BUG_ON(!dnext); |
372 | 270 | ||
373 | /* | ||
374 | * reposition drq (this is the merged request) in hash, and in rbtree | ||
375 | * in case of a front merge | ||
376 | */ | ||
377 | deadline_del_drq_hash(drq); | ||
378 | deadline_add_drq_hash(dd, drq); | ||
379 | |||
380 | if (rq_rb_key(req) != drq->rb_key) { | 271 | if (rq_rb_key(req) != drq->rb_key) { |
381 | deadline_del_drq_rb(dd, drq); | 272 | deadline_del_drq_rb(dd, drq); |
382 | deadline_add_drq_rb(dd, drq); | 273 | deadline_add_drq_rb(dd, drq); |
@@ -594,7 +485,6 @@ static void deadline_exit_queue(elevator_t *e) | |||
594 | BUG_ON(!list_empty(&dd->fifo_list[WRITE])); | 485 | BUG_ON(!list_empty(&dd->fifo_list[WRITE])); |
595 | 486 | ||
596 | mempool_destroy(dd->drq_pool); | 487 | mempool_destroy(dd->drq_pool); |
597 | kfree(dd->hash); | ||
598 | kfree(dd); | 488 | kfree(dd); |
599 | } | 489 | } |
600 | 490 | ||
@@ -605,7 +495,6 @@ static void deadline_exit_queue(elevator_t *e) | |||
605 | static void *deadline_init_queue(request_queue_t *q, elevator_t *e) | 495 | static void *deadline_init_queue(request_queue_t *q, elevator_t *e) |
606 | { | 496 | { |
607 | struct deadline_data *dd; | 497 | struct deadline_data *dd; |
608 | int i; | ||
609 | 498 | ||
610 | if (!drq_pool) | 499 | if (!drq_pool) |
611 | return NULL; | 500 | return NULL; |
@@ -615,24 +504,13 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e) | |||
615 | return NULL; | 504 | return NULL; |
616 | memset(dd, 0, sizeof(*dd)); | 505 | memset(dd, 0, sizeof(*dd)); |
617 | 506 | ||
618 | dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES, | ||
619 | GFP_KERNEL, q->node); | ||
620 | if (!dd->hash) { | ||
621 | kfree(dd); | ||
622 | return NULL; | ||
623 | } | ||
624 | |||
625 | dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, | 507 | dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, |
626 | mempool_free_slab, drq_pool, q->node); | 508 | mempool_free_slab, drq_pool, q->node); |
627 | if (!dd->drq_pool) { | 509 | if (!dd->drq_pool) { |
628 | kfree(dd->hash); | ||
629 | kfree(dd); | 510 | kfree(dd); |
630 | return NULL; | 511 | return NULL; |
631 | } | 512 | } |
632 | 513 | ||
633 | for (i = 0; i < DL_HASH_ENTRIES; i++) | ||
634 | INIT_HLIST_HEAD(&dd->hash[i]); | ||
635 | |||
636 | INIT_LIST_HEAD(&dd->fifo_list[READ]); | 514 | INIT_LIST_HEAD(&dd->fifo_list[READ]); |
637 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); | 515 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); |
638 | dd->sort_list[READ] = RB_ROOT; | 516 | dd->sort_list[READ] = RB_ROOT; |
@@ -667,8 +545,6 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
667 | RB_CLEAR_NODE(&drq->rb_node); | 545 | RB_CLEAR_NODE(&drq->rb_node); |
668 | drq->request = rq; | 546 | drq->request = rq; |
669 | 547 | ||
670 | INIT_HLIST_NODE(&drq->hash); | ||
671 | |||
672 | INIT_LIST_HEAD(&drq->fifo); | 548 | INIT_LIST_HEAD(&drq->fifo); |
673 | 549 | ||
674 | rq->elevator_private = drq; | 550 | rq->elevator_private = drq; |