diff options
author | Tejun Heo <htejun@gmail.com> | 2005-10-28 02:29:39 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2005-10-28 02:48:12 -0400 |
commit | cb98fc8bb9c141009e2bda99c0db39d387e142cf (patch) | |
tree | 8957f8a79f39c3e6633a0dbb165ced8b530aca0c /include/linux | |
parent | cb19833dccb32f97cacbfff834b53523915f13f6 (diff) |
[BLOCK] Reimplement elevator switch
This patch reimplements elevator switch. This patch assumes generic
dispatch queue patchset is applied.
* Each request is tagged with REQ_ELVPRIV flag if it has its elevator
private data set.
* Requests which doesn't have REQ_ELVPRIV flag set never enter
iosched. They are always directly back inserted to dispatch queue.
Of course, elevator_put_req_fn is called only for requests which
have its REQ_ELVPRIV set.
* Request queue maintains the current number of requests which have
its elevator data set (elevator_set_req_fn called) in
q->rq->elvpriv.
* If a request queue has QUEUE_FLAG_BYPASS set, elevator private data
is not allocated for new requests.
To switch to another iosched, we set QUEUE_FLAG_BYPASS and wait until
elvpriv goes to zero; then, we attach the new iosched and clears
QUEUE_FLAG_BYPASS. New implementation is much simpler and main code
paths are less cluttered, IMHO.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/blkdev.h | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 159dbcd2eb59..6186d5e2110f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -107,9 +107,9 @@ typedef void (rq_end_io_fn)(struct request *); | |||
107 | struct request_list { | 107 | struct request_list { |
108 | int count[2]; | 108 | int count[2]; |
109 | int starved[2]; | 109 | int starved[2]; |
110 | int elvpriv; | ||
110 | mempool_t *rq_pool; | 111 | mempool_t *rq_pool; |
111 | wait_queue_head_t wait[2]; | 112 | wait_queue_head_t wait[2]; |
112 | wait_queue_head_t drain; | ||
113 | }; | 113 | }; |
114 | 114 | ||
115 | #define BLK_MAX_CDB 16 | 115 | #define BLK_MAX_CDB 16 |
@@ -211,6 +211,7 @@ enum rq_flag_bits { | |||
211 | __REQ_STARTED, /* drive already may have started this one */ | 211 | __REQ_STARTED, /* drive already may have started this one */ |
212 | __REQ_DONTPREP, /* don't call prep for this one */ | 212 | __REQ_DONTPREP, /* don't call prep for this one */ |
213 | __REQ_QUEUED, /* uses queueing */ | 213 | __REQ_QUEUED, /* uses queueing */ |
214 | __REQ_ELVPRIV, /* elevator private data attached */ | ||
214 | /* | 215 | /* |
215 | * for ATA/ATAPI devices | 216 | * for ATA/ATAPI devices |
216 | */ | 217 | */ |
@@ -244,6 +245,7 @@ enum rq_flag_bits { | |||
244 | #define REQ_STARTED (1 << __REQ_STARTED) | 245 | #define REQ_STARTED (1 << __REQ_STARTED) |
245 | #define REQ_DONTPREP (1 << __REQ_DONTPREP) | 246 | #define REQ_DONTPREP (1 << __REQ_DONTPREP) |
246 | #define REQ_QUEUED (1 << __REQ_QUEUED) | 247 | #define REQ_QUEUED (1 << __REQ_QUEUED) |
248 | #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) | ||
247 | #define REQ_PC (1 << __REQ_PC) | 249 | #define REQ_PC (1 << __REQ_PC) |
248 | #define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC) | 250 | #define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC) |
249 | #define REQ_SENSE (1 << __REQ_SENSE) | 251 | #define REQ_SENSE (1 << __REQ_SENSE) |
@@ -413,8 +415,6 @@ struct request_queue | |||
413 | unsigned int sg_reserved_size; | 415 | unsigned int sg_reserved_size; |
414 | int node; | 416 | int node; |
415 | 417 | ||
416 | struct list_head drain_list; | ||
417 | |||
418 | /* | 418 | /* |
419 | * reserved for flush operations | 419 | * reserved for flush operations |
420 | */ | 420 | */ |
@@ -442,7 +442,7 @@ enum { | |||
442 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 442 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
443 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 443 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ |
444 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ | 444 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ |
445 | #define QUEUE_FLAG_DRAIN 8 /* draining queue for sched switch */ | 445 | #define QUEUE_FLAG_BYPASS 8 /* don't use elevator, just do FIFO */ |
446 | #define QUEUE_FLAG_FLUSH 9 /* doing barrier flush sequence */ | 446 | #define QUEUE_FLAG_FLUSH 9 /* doing barrier flush sequence */ |
447 | 447 | ||
448 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) | 448 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) |
@@ -668,8 +668,6 @@ extern void blk_dump_rq_flags(struct request *, char *); | |||
668 | extern void generic_unplug_device(request_queue_t *); | 668 | extern void generic_unplug_device(request_queue_t *); |
669 | extern void __generic_unplug_device(request_queue_t *); | 669 | extern void __generic_unplug_device(request_queue_t *); |
670 | extern long nr_blockdev_pages(void); | 670 | extern long nr_blockdev_pages(void); |
671 | extern void blk_wait_queue_drained(request_queue_t *, int); | ||
672 | extern void blk_finish_queue_drain(request_queue_t *); | ||
673 | 671 | ||
674 | int blk_get_queue(request_queue_t *); | 672 | int blk_get_queue(request_queue_t *); |
675 | request_queue_t *blk_alloc_queue(int gfp_mask); | 673 | request_queue_t *blk_alloc_queue(int gfp_mask); |