diff options
author | Jens Axboe <axboe@suse.de> | 2005-10-20 10:37:00 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2005-10-28 02:44:37 -0400 |
commit | 1b47f531e244e339b648dfff071c086f936e49e1 (patch) | |
tree | 4ecbec3d9ade19e01a47c562772db550daadcef2 /drivers/block/elevator.c | |
parent | 8922e16cf6269e668123acb1ae1fdc62b7a3a4fc (diff) |
[PATCH] generic dispatch fixes
- Split elv_dispatch_insert() into two functions
- Rename rq_last_sector() to rq_end_sector()
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'drivers/block/elevator.c')
-rw-r--r-- | drivers/block/elevator.c | 33 |
1 files changed, 12 insertions, 21 deletions
diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c index a27555908d35..237e43d760c8 100644 --- a/drivers/block/elevator.c +++ b/drivers/block/elevator.c | |||
@@ -40,11 +40,6 @@ | |||
40 | static DEFINE_SPINLOCK(elv_list_lock); | 40 | static DEFINE_SPINLOCK(elv_list_lock); |
41 | static LIST_HEAD(elv_list); | 41 | static LIST_HEAD(elv_list); |
42 | 42 | ||
43 | static inline sector_t rq_last_sector(struct request *rq) | ||
44 | { | ||
45 | return rq->sector + rq->nr_sectors; | ||
46 | } | ||
47 | |||
48 | /* | 43 | /* |
49 | * can we safely merge with this request? | 44 | * can we safely merge with this request? |
50 | */ | 45 | */ |
@@ -148,7 +143,7 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e, | |||
148 | INIT_LIST_HEAD(&q->queue_head); | 143 | INIT_LIST_HEAD(&q->queue_head); |
149 | q->last_merge = NULL; | 144 | q->last_merge = NULL; |
150 | q->elevator = eq; | 145 | q->elevator = eq; |
151 | q->last_sector = 0; | 146 | q->end_sector = 0; |
152 | q->boundary_rq = NULL; | 147 | q->boundary_rq = NULL; |
153 | q->max_back_kb = 0; | 148 | q->max_back_kb = 0; |
154 | 149 | ||
@@ -233,29 +228,25 @@ void elevator_exit(elevator_t *e) | |||
233 | kfree(e); | 228 | kfree(e); |
234 | } | 229 | } |
235 | 230 | ||
231 | void elv_dispatch_insert_tail(request_queue_t *q, struct request *rq) | ||
232 | { | ||
233 | } | ||
234 | |||
236 | /* | 235 | /* |
237 | * Insert rq into dispatch queue of q. Queue lock must be held on | 236 | * Insert rq into dispatch queue of q. Queue lock must be held on |
238 | * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be | 237 | * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be |
239 | * appended to the dispatch queue. To be used by specific elevators. | 238 | * appended to the dispatch queue. To be used by specific elevators. |
240 | */ | 239 | */ |
241 | void elv_dispatch_insert(request_queue_t *q, struct request *rq, int sort) | 240 | void elv_dispatch_sort(request_queue_t *q, struct request *rq) |
242 | { | 241 | { |
243 | sector_t boundary; | 242 | sector_t boundary; |
244 | unsigned max_back; | 243 | unsigned max_back; |
245 | struct list_head *entry; | 244 | struct list_head *entry; |
246 | 245 | ||
247 | if (!sort) { | 246 | boundary = q->end_sector; |
248 | /* Specific elevator is performing sort. Step away. */ | ||
249 | q->last_sector = rq_last_sector(rq); | ||
250 | q->boundary_rq = rq; | ||
251 | list_add_tail(&rq->queuelist, &q->queue_head); | ||
252 | return; | ||
253 | } | ||
254 | |||
255 | boundary = q->last_sector; | ||
256 | max_back = q->max_back_kb * 2; | 247 | max_back = q->max_back_kb * 2; |
257 | boundary = boundary > max_back ? boundary - max_back : 0; | 248 | boundary = boundary > max_back ? boundary - max_back : 0; |
258 | 249 | ||
259 | list_for_each_prev(entry, &q->queue_head) { | 250 | list_for_each_prev(entry, &q->queue_head) { |
260 | struct request *pos = list_entry_rq(entry); | 251 | struct request *pos = list_entry_rq(entry); |
261 | 252 | ||
@@ -343,10 +334,10 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, | |||
343 | where = ELEVATOR_INSERT_BACK; | 334 | where = ELEVATOR_INSERT_BACK; |
344 | 335 | ||
345 | /* | 336 | /* |
346 | * this request is scheduling boundary, update last_sector | 337 | * this request is scheduling boundary, update end_sector |
347 | */ | 338 | */ |
348 | if (blk_fs_request(rq)) { | 339 | if (blk_fs_request(rq)) { |
349 | q->last_sector = rq_last_sector(rq); | 340 | q->end_sector = rq_end_sector(rq); |
350 | q->boundary_rq = rq; | 341 | q->boundary_rq = rq; |
351 | } | 342 | } |
352 | } | 343 | } |
@@ -479,7 +470,7 @@ struct request *elv_next_request(request_queue_t *q) | |||
479 | q->last_merge = NULL; | 470 | q->last_merge = NULL; |
480 | 471 | ||
481 | if (!q->boundary_rq || q->boundary_rq == rq) { | 472 | if (!q->boundary_rq || q->boundary_rq == rq) { |
482 | q->last_sector = rq_last_sector(rq); | 473 | q->end_sector = rq_end_sector(rq); |
483 | q->boundary_rq = NULL; | 474 | q->boundary_rq = NULL; |
484 | } | 475 | } |
485 | 476 | ||
@@ -802,7 +793,7 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name) | |||
802 | return len; | 793 | return len; |
803 | } | 794 | } |
804 | 795 | ||
805 | EXPORT_SYMBOL(elv_dispatch_insert); | 796 | EXPORT_SYMBOL(elv_dispatch_sort); |
806 | EXPORT_SYMBOL(elv_add_request); | 797 | EXPORT_SYMBOL(elv_add_request); |
807 | EXPORT_SYMBOL(__elv_add_request); | 798 | EXPORT_SYMBOL(__elv_add_request); |
808 | EXPORT_SYMBOL(elv_requeue_request); | 799 | EXPORT_SYMBOL(elv_requeue_request); |