aboutsummaryrefslogtreecommitdiffstats
path: root/block/elevator.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-07-28 03:23:08 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 14:26:56 -0400
commit9817064b68fef7e4580c6df1ea597e106b9ff88b (patch)
tree76c27990626247613e9efa45b792d51ad79635d7 /block/elevator.c
parent4aff5e2333c9a1609662f2091f55c3f6fffdad36 (diff)
[PATCH] elevator: move the backmerging logic into the elevator core
Right now, every IO scheduler implements its own backmerging (except for noop, which does no merging). That results in duplicated code for essentially the same operation, which is never a good thing. This patch moves the backmerging out of the io schedulers and into the elevator core. We save 1.6kb of text and as a bonus get backmerging for noop as well. Win-win! Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/elevator.c')
-rw-r--r--block/elevator.c147
1 files changed, 132 insertions, 15 deletions
diff --git a/block/elevator.c b/block/elevator.c
index 4ac97b642042..cff1102dac9d 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -33,6 +33,7 @@
33#include <linux/compiler.h> 33#include <linux/compiler.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/blktrace_api.h> 35#include <linux/blktrace_api.h>
36#include <linux/hash.h>
36 37
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38 39
@@ -40,6 +41,16 @@ static DEFINE_SPINLOCK(elv_list_lock);
40static LIST_HEAD(elv_list); 41static LIST_HEAD(elv_list);
41 42
42/* 43/*
44 * Merge hash stuff.
45 */
46static const int elv_hash_shift = 6;
47#define ELV_HASH_BLOCK(sec) ((sec) >> 3)
48#define ELV_HASH_FN(sec) (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
49#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
50#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
51#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
52
53/*
43 * can we safely merge with this request? 54 * can we safely merge with this request?
44 */ 55 */
45inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) 56inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
@@ -153,25 +164,41 @@ static struct kobj_type elv_ktype;
153 164
154static elevator_t *elevator_alloc(struct elevator_type *e) 165static elevator_t *elevator_alloc(struct elevator_type *e)
155{ 166{
156 elevator_t *eq = kmalloc(sizeof(elevator_t), GFP_KERNEL); 167 elevator_t *eq;
157 if (eq) { 168 int i;
158 memset(eq, 0, sizeof(*eq)); 169
159 eq->ops = &e->ops; 170 eq = kmalloc(sizeof(elevator_t), GFP_KERNEL);
160 eq->elevator_type = e; 171 if (unlikely(!eq))
161 kobject_init(&eq->kobj); 172 goto err;
162 snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched"); 173
163 eq->kobj.ktype = &elv_ktype; 174 memset(eq, 0, sizeof(*eq));
164 mutex_init(&eq->sysfs_lock); 175 eq->ops = &e->ops;
165 } else { 176 eq->elevator_type = e;
166 elevator_put(e); 177 kobject_init(&eq->kobj);
167 } 178 snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
179 eq->kobj.ktype = &elv_ktype;
180 mutex_init(&eq->sysfs_lock);
181
182 eq->hash = kmalloc(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, GFP_KERNEL);
183 if (!eq->hash)
184 goto err;
185
186 for (i = 0; i < ELV_HASH_ENTRIES; i++)
187 INIT_HLIST_HEAD(&eq->hash[i]);
188
168 return eq; 189 return eq;
190err:
191 kfree(eq);
192 elevator_put(e);
193 return NULL;
169} 194}
170 195
171static void elevator_release(struct kobject *kobj) 196static void elevator_release(struct kobject *kobj)
172{ 197{
173 elevator_t *e = container_of(kobj, elevator_t, kobj); 198 elevator_t *e = container_of(kobj, elevator_t, kobj);
199
174 elevator_put(e->elevator_type); 200 elevator_put(e->elevator_type);
201 kfree(e->hash);
175 kfree(e); 202 kfree(e);
176} 203}
177 204
@@ -223,6 +250,53 @@ void elevator_exit(elevator_t *e)
223 kobject_put(&e->kobj); 250 kobject_put(&e->kobj);
224} 251}
225 252
253static inline void __elv_rqhash_del(struct request *rq)
254{
255 hlist_del_init(&rq->hash);
256}
257
258static void elv_rqhash_del(request_queue_t *q, struct request *rq)
259{
260 if (ELV_ON_HASH(rq))
261 __elv_rqhash_del(rq);
262}
263
264static void elv_rqhash_add(request_queue_t *q, struct request *rq)
265{
266 elevator_t *e = q->elevator;
267
268 BUG_ON(ELV_ON_HASH(rq));
269 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
270}
271
272static void elv_rqhash_reposition(request_queue_t *q, struct request *rq)
273{
274 __elv_rqhash_del(rq);
275 elv_rqhash_add(q, rq);
276}
277
278static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset)
279{
280 elevator_t *e = q->elevator;
281 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
282 struct hlist_node *entry, *next;
283 struct request *rq;
284
285 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
286 BUG_ON(!ELV_ON_HASH(rq));
287
288 if (unlikely(!rq_mergeable(rq))) {
289 __elv_rqhash_del(rq);
290 continue;
291 }
292
293 if (rq_hash_key(rq) == offset)
294 return rq;
295 }
296
297 return NULL;
298}
299
226/* 300/*
227 * Insert rq into dispatch queue of q. Queue lock must be held on 301 * Insert rq into dispatch queue of q. Queue lock must be held on
228 * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be 302 * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be
@@ -235,6 +309,9 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq)
235 309
236 if (q->last_merge == rq) 310 if (q->last_merge == rq)
237 q->last_merge = NULL; 311 q->last_merge = NULL;
312
313 elv_rqhash_del(q, rq);
314
238 q->nr_sorted--; 315 q->nr_sorted--;
239 316
240 boundary = q->end_sector; 317 boundary = q->end_sector;
@@ -258,11 +335,32 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq)
258 list_add(&rq->queuelist, entry); 335 list_add(&rq->queuelist, entry);
259} 336}
260 337
338/*
339 * This should be in elevator.h, but that requires pulling in rq and q
340 */
341void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
342{
343 if (q->last_merge == rq)
344 q->last_merge = NULL;
345
346 elv_rqhash_del(q, rq);
347
348 q->nr_sorted--;
349
350 q->end_sector = rq_end_sector(rq);
351 q->boundary_rq = rq;
352 list_add_tail(&rq->queuelist, &q->queue_head);
353}
354
261int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) 355int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
262{ 356{
263 elevator_t *e = q->elevator; 357 elevator_t *e = q->elevator;
358 struct request *__rq;
264 int ret; 359 int ret;
265 360
361 /*
362 * First try one-hit cache.
363 */
266 if (q->last_merge) { 364 if (q->last_merge) {
267 ret = elv_try_merge(q->last_merge, bio); 365 ret = elv_try_merge(q->last_merge, bio);
268 if (ret != ELEVATOR_NO_MERGE) { 366 if (ret != ELEVATOR_NO_MERGE) {
@@ -271,6 +369,15 @@ int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
271 } 369 }
272 } 370 }
273 371
372 /*
373 * See if our hash lookup can find a potential backmerge.
374 */
375 __rq = elv_rqhash_find(q, bio->bi_sector);
376 if (__rq && elv_rq_merge_ok(__rq, bio)) {
377 *req = __rq;
378 return ELEVATOR_BACK_MERGE;
379 }
380
274 if (e->ops->elevator_merge_fn) 381 if (e->ops->elevator_merge_fn)
275 return e->ops->elevator_merge_fn(q, req, bio); 382 return e->ops->elevator_merge_fn(q, req, bio);
276 383
@@ -284,6 +391,8 @@ void elv_merged_request(request_queue_t *q, struct request *rq)
284 if (e->ops->elevator_merged_fn) 391 if (e->ops->elevator_merged_fn)
285 e->ops->elevator_merged_fn(q, rq); 392 e->ops->elevator_merged_fn(q, rq);
286 393
394 elv_rqhash_reposition(q, rq);
395
287 q->last_merge = rq; 396 q->last_merge = rq;
288} 397}
289 398
@@ -294,8 +403,11 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
294 403
295 if (e->ops->elevator_merge_req_fn) 404 if (e->ops->elevator_merge_req_fn)
296 e->ops->elevator_merge_req_fn(q, rq, next); 405 e->ops->elevator_merge_req_fn(q, rq, next);
297 q->nr_sorted--;
298 406
407 elv_rqhash_reposition(q, rq);
408 elv_rqhash_del(q, next);
409
410 q->nr_sorted--;
299 q->last_merge = rq; 411 q->last_merge = rq;
300} 412}
301 413
@@ -371,8 +483,12 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
371 BUG_ON(!blk_fs_request(rq)); 483 BUG_ON(!blk_fs_request(rq));
372 rq->cmd_flags |= REQ_SORTED; 484 rq->cmd_flags |= REQ_SORTED;
373 q->nr_sorted++; 485 q->nr_sorted++;
374 if (q->last_merge == NULL && rq_mergeable(rq)) 486 if (rq_mergeable(rq)) {
375 q->last_merge = rq; 487 elv_rqhash_add(q, rq);
488 if (!q->last_merge)
489 q->last_merge = rq;
490 }
491
376 /* 492 /*
377 * Some ioscheds (cfq) run q->request_fn directly, so 493 * Some ioscheds (cfq) run q->request_fn directly, so
378 * rq cannot be accessed after calling 494 * rq cannot be accessed after calling
@@ -557,6 +673,7 @@ struct request *elv_next_request(request_queue_t *q)
557void elv_dequeue_request(request_queue_t *q, struct request *rq) 673void elv_dequeue_request(request_queue_t *q, struct request *rq)
558{ 674{
559 BUG_ON(list_empty(&rq->queuelist)); 675 BUG_ON(list_empty(&rq->queuelist));
676 BUG_ON(ELV_ON_HASH(rq));
560 677
561 list_del_init(&rq->queuelist); 678 list_del_init(&rq->queuelist);
562 679