aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <htejun@gmail.com>2008-02-19 05:36:53 -0500
committerJens Axboe <jens.axboe@oracle.com>2008-02-19 05:36:53 -0500
commit2fb98e8414c42cb14698833aac640b143b9ade4f (patch)
treeab241305b5ce3ea5ef3e76df7abe04b831cd9e34
parent6b00769fe1502b4ad97bb327ef7ac971b208bfb5 (diff)
block: implement request_queue->dma_drain_needed
Draining shouldn't be done for commands where overflow may indicate data integrity issues. Add dma_drain_needed callback to request_queue. Drain buffer is appened iff this function returns non-zero. Signed-off-by: Tejun Heo <htejun@gmail.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-settings.c7
-rw-r--r--include/linux/blkdev.h7
3 files changed, 11 insertions, 5 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 39f2e077a014..bef1b4d0fc02 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -220,7 +220,7 @@ new_segment:
220 bvprv = bvec; 220 bvprv = bvec;
221 } /* segments in rq */ 221 } /* segments in rq */
222 222
223 if (q->dma_drain_size) { 223 if (q->dma_drain_size && q->dma_drain_needed(rq)) {
224 sg->page_link &= ~0x02; 224 sg->page_link &= ~0x02;
225 sg = sg_next(sg); 225 sg = sg_next(sg);
226 sg_set_page(sg, virt_to_page(q->dma_drain_buffer), 226 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 13536a388d27..9a8ffdd0ce3d 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -296,6 +296,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
296 * blk_queue_dma_drain - Set up a drain buffer for excess dma. 296 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
297 * 297 *
298 * @q: the request queue for the device 298 * @q: the request queue for the device
299 * @dma_drain_needed: fn which returns non-zero if drain is necessary
299 * @buf: physically contiguous buffer 300 * @buf: physically contiguous buffer
300 * @size: size of the buffer in bytes 301 * @size: size of the buffer in bytes
301 * 302 *
@@ -315,14 +316,16 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
315 * device can support otherwise there won't be room for the drain 316 * device can support otherwise there won't be room for the drain
316 * buffer. 317 * buffer.
317 */ 318 */
318int blk_queue_dma_drain(struct request_queue *q, void *buf, 319extern int blk_queue_dma_drain(struct request_queue *q,
319 unsigned int size) 320 dma_drain_needed_fn *dma_drain_needed,
321 void *buf, unsigned int size)
320{ 322{
321 if (q->max_hw_segments < 2 || q->max_phys_segments < 2) 323 if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
322 return -EINVAL; 324 return -EINVAL;
323 /* make room for appending the drain */ 325 /* make room for appending the drain */
324 --q->max_hw_segments; 326 --q->max_hw_segments;
325 --q->max_phys_segments; 327 --q->max_phys_segments;
328 q->dma_drain_needed = dma_drain_needed;
326 q->dma_drain_buffer = buf; 329 q->dma_drain_buffer = buf;
327 q->dma_drain_size = size; 330 q->dma_drain_size = size;
328 331
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f1fe9fbf1c0e..6fe67d1939c2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -259,6 +259,7 @@ struct bio_vec;
259typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); 259typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
260typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 260typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
261typedef void (softirq_done_fn)(struct request *); 261typedef void (softirq_done_fn)(struct request *);
262typedef int (dma_drain_needed_fn)(struct request *);
262 263
263enum blk_queue_state { 264enum blk_queue_state {
264 Queue_down, 265 Queue_down,
@@ -295,6 +296,7 @@ struct request_queue
295 merge_bvec_fn *merge_bvec_fn; 296 merge_bvec_fn *merge_bvec_fn;
296 prepare_flush_fn *prepare_flush_fn; 297 prepare_flush_fn *prepare_flush_fn;
297 softirq_done_fn *softirq_done_fn; 298 softirq_done_fn *softirq_done_fn;
299 dma_drain_needed_fn *dma_drain_needed;
298 300
299 /* 301 /*
300 * Dispatch queue sorting 302 * Dispatch queue sorting
@@ -699,8 +701,9 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
699extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 701extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
700extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 702extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
701extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 703extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
702extern int blk_queue_dma_drain(struct request_queue *q, void *buf, 704extern int blk_queue_dma_drain(struct request_queue *q,
703 unsigned int size); 705 dma_drain_needed_fn *dma_drain_needed,
706 void *buf, unsigned int size);
704extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 707extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
705extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 708extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
706extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 709extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);