aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-01-09 10:02:34 -0500
committerJens Axboe <axboe@suse.de>2006-01-09 10:02:34 -0500
commitff856bad67cb65cb4dc4ef88b808804fc4265782 (patch)
tree2db1e0be2be1e814cf8fe9bb8d3d7401fb24dd86 /include/linux/blkdev.h
parent5367f2d67c7d0bf1faae90e6e7b4e2ac3c9b5e0f (diff)
[BLOCK] ll_rw_blk: Enable out-of-order request completions through softirq
Request completion can be a quite heavy process, since it needs to iterate through the entire request and complete the bio's it holds. This patch adds blk_complete_request() which moves this processing into a dedicated block softirq. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h21
1 files changed, 18 insertions, 3 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fb0985377421..804cc4ec9533 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -118,9 +118,9 @@ struct request_list {
118 * try to put the fields that are referenced together in the same cacheline 118 * try to put the fields that are referenced together in the same cacheline
119 */ 119 */
120struct request { 120struct request {
121 struct list_head queuelist; /* looking for ->queue? you must _not_ 121 struct list_head queuelist;
122 * access it directly, use 122 struct list_head donelist;
123 * blkdev_dequeue_request! */ 123
124 unsigned long flags; /* see REQ_ bits below */ 124 unsigned long flags; /* see REQ_ bits below */
125 125
126 /* Maintain bio traversal state for part by part I/O submission. 126 /* Maintain bio traversal state for part by part I/O submission.
@@ -141,6 +141,7 @@ struct request {
141 struct bio *biotail; 141 struct bio *biotail;
142 142
143 void *elevator_private; 143 void *elevator_private;
144 void *completion_data;
144 145
145 unsigned short ioprio; 146 unsigned short ioprio;
146 147
@@ -291,6 +292,7 @@ typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
291typedef void (activity_fn) (void *data, int rw); 292typedef void (activity_fn) (void *data, int rw);
292typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); 293typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
293typedef void (prepare_flush_fn) (request_queue_t *, struct request *); 294typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
295typedef void (softirq_done_fn)(struct request *);
294 296
295enum blk_queue_state { 297enum blk_queue_state {
296 Queue_down, 298 Queue_down,
@@ -332,6 +334,7 @@ struct request_queue
332 activity_fn *activity_fn; 334 activity_fn *activity_fn;
333 issue_flush_fn *issue_flush_fn; 335 issue_flush_fn *issue_flush_fn;
334 prepare_flush_fn *prepare_flush_fn; 336 prepare_flush_fn *prepare_flush_fn;
337 softirq_done_fn *softirq_done_fn;
335 338
336 /* 339 /*
337 * Dispatch queue sorting 340 * Dispatch queue sorting
@@ -646,6 +649,17 @@ extern int end_that_request_first(struct request *, int, int);
646extern int end_that_request_chunk(struct request *, int, int); 649extern int end_that_request_chunk(struct request *, int, int);
647extern void end_that_request_last(struct request *, int); 650extern void end_that_request_last(struct request *, int);
648extern void end_request(struct request *req, int uptodate); 651extern void end_request(struct request *req, int uptodate);
652extern void blk_complete_request(struct request *);
653
654static inline int rq_all_done(struct request *rq, unsigned int nr_bytes)
655{
656 if (blk_fs_request(rq))
657 return (nr_bytes >= (rq->hard_nr_sectors << 9));
658 else if (blk_pc_request(rq))
659 return nr_bytes >= rq->data_len;
660
661 return 0;
662}
649 663
650/* 664/*
651 * end_that_request_first/chunk() takes an uptodate argument. we account 665 * end_that_request_first/chunk() takes an uptodate argument. we account
@@ -694,6 +708,7 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
694extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); 708extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
695extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); 709extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
696extern void blk_queue_dma_alignment(request_queue_t *, int); 710extern void blk_queue_dma_alignment(request_queue_t *, int);
711extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *);
697extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 712extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
698extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *); 713extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);
699extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); 714extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);