diff options
author | James Bottomley <James.Bottomley@HansenPartnership.com> | 2007-12-31 17:37:00 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2008-01-11 19:29:20 -0500 |
commit | 11c3e689f1c3a73e3af7b0ea767b1b0626da8033 (patch) | |
tree | 0f1d109897b13c9d0f86a3ba5af088596f728cdb | |
parent | 166a72878f76e911f2d1ce4b2310d4403e94be11 (diff) |
[SCSI] block: Introduce new blk_queue_update_dma_alignment interface
The purpose of this is to allow stacked alignment settings, with the
ultimate queue alignment being set to the largest alignment requirement
in the stack.
The reason for this is so that the SCSI mid-layer can relax the default
alignment requirements (which are basically causing a lot of superfluous
copying to go on in the SG_IO interface) while allowing transports,
devices or HBAs to add stricter limits if they need them.
Acked-by: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
-rw-r--r-- | block/ll_rw_blk.c | 24 | ||||
-rw-r--r-- | include/linux/blkdev.h | 1 |
2 files changed, 25 insertions, 0 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 8b919940b2ab..14af36c5cdb2 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -760,6 +760,30 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask) | |||
760 | EXPORT_SYMBOL(blk_queue_dma_alignment); | 760 | EXPORT_SYMBOL(blk_queue_dma_alignment); |
761 | 761 | ||
762 | /** | 762 | /** |
763 | * blk_queue_update_dma_alignment - update dma length and memory alignment | ||
764 | * @q: the request queue for the device | ||
765 | * @mask: alignment mask | ||
766 | * | ||
767 | * description: | ||
768 | * update required memory and length aligment for direct dma transactions. | ||
769 | * If the requested alignment is larger than the current alignment, then | ||
770 | * the current queue alignment is updated to the new value, otherwise it | ||
771 | * is left alone. The design of this is to allow multiple objects | ||
772 | * (driver, device, transport etc) to set their respective | ||
773 | * alignments without having them interfere. | ||
774 | * | ||
775 | **/ | ||
776 | void blk_queue_update_dma_alignment(struct request_queue *q, int mask) | ||
777 | { | ||
778 | BUG_ON(mask > PAGE_SIZE); | ||
779 | |||
780 | if (mask > q->dma_alignment) | ||
781 | q->dma_alignment = mask; | ||
782 | } | ||
783 | |||
784 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); | ||
785 | |||
786 | /** | ||
763 | * blk_queue_find_tag - find a request by its tag and queue | 787 | * blk_queue_find_tag - find a request by its tag and queue |
764 | * @q: The request queue for the device | 788 | * @q: The request queue for the device |
765 | * @tag: The tag of the request | 789 | * @tag: The tag of the request |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d18ee67b40f8..81e99e516302 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -766,6 +766,7 @@ extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); | |||
766 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); | 766 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
767 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | 767 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); |
768 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 768 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
769 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | ||
769 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 770 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
770 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 771 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
771 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); | 772 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); |