diff options
author | James Bottomley <James.Bottomley@HansenPartnership.com> | 2007-12-31 17:37:00 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2008-01-11 19:29:20 -0500 |
commit | 11c3e689f1c3a73e3af7b0ea767b1b0626da8033 (patch) | |
tree | 0f1d109897b13c9d0f86a3ba5af088596f728cdb /block | |
parent | 166a72878f76e911f2d1ce4b2310d4403e94be11 (diff) |
[SCSI] block: Introduce new blk_queue_update_dma_alignment interface
The purpose of this is to allow stacked alignment settings, with the
ultimate queue alignment being set to the largest alignment requirement
in the stack.
The reason for this is so that the SCSI mid-layer can relax the default
alignment requirements (which are basically causing a lot of superfluous
copying to go on in the SG_IO interface) while allowing transports,
devices or HBAs to add stricter limits if they need them.
Acked-by: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/ll_rw_blk.c | 24 |
1 files changed, 24 insertions, 0 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 8b919940b2ab..14af36c5cdb2 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -760,6 +760,30 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask) | |||
760 | EXPORT_SYMBOL(blk_queue_dma_alignment); | 760 | EXPORT_SYMBOL(blk_queue_dma_alignment); |
761 | 761 | ||
762 | /** | 762 | /** |
763 | * blk_queue_update_dma_alignment - update dma length and memory alignment | ||
764 | * @q: the request queue for the device | ||
765 | * @mask: alignment mask | ||
766 | * | ||
767 | * description: | ||
768 | * update required memory and length aligment for direct dma transactions. | ||
769 | * If the requested alignment is larger than the current alignment, then | ||
770 | * the current queue alignment is updated to the new value, otherwise it | ||
771 | * is left alone. The design of this is to allow multiple objects | ||
772 | * (driver, device, transport etc) to set their respective | ||
773 | * alignments without having them interfere. | ||
774 | * | ||
775 | **/ | ||
776 | void blk_queue_update_dma_alignment(struct request_queue *q, int mask) | ||
777 | { | ||
778 | BUG_ON(mask > PAGE_SIZE); | ||
779 | |||
780 | if (mask > q->dma_alignment) | ||
781 | q->dma_alignment = mask; | ||
782 | } | ||
783 | |||
784 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); | ||
785 | |||
786 | /** | ||
763 | * blk_queue_find_tag - find a request by its tag and queue | 787 | * blk_queue_find_tag - find a request by its tag and queue |
764 | * @q: The request queue for the device | 788 | * @q: The request queue for the device |
765 | * @tag: The tag of the request | 789 | * @tag: The tag of the request |