diff options
-rw-r--r-- | block/ll_rw_blk.c | 24 | ||||
-rw-r--r-- | include/linux/blkdev.h | 1 |
2 files changed, 25 insertions, 0 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 8b919940b2ab..14af36c5cdb2 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -760,6 +760,30 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask) | |||
760 | EXPORT_SYMBOL(blk_queue_dma_alignment); | 760 | EXPORT_SYMBOL(blk_queue_dma_alignment); |
761 | 761 | ||
762 | /** | 762 | /** |
763 | * blk_queue_update_dma_alignment - update dma length and memory alignment | ||
764 | * @q: the request queue for the device | ||
765 | * @mask: alignment mask | ||
766 | * | ||
767 | * description: | ||
768 | * update required memory and length aligment for direct dma transactions. | ||
769 | * If the requested alignment is larger than the current alignment, then | ||
770 | * the current queue alignment is updated to the new value, otherwise it | ||
771 | * is left alone. The design of this is to allow multiple objects | ||
772 | * (driver, device, transport etc) to set their respective | ||
773 | * alignments without having them interfere. | ||
774 | * | ||
775 | **/ | ||
776 | void blk_queue_update_dma_alignment(struct request_queue *q, int mask) | ||
777 | { | ||
778 | BUG_ON(mask > PAGE_SIZE); | ||
779 | |||
780 | if (mask > q->dma_alignment) | ||
781 | q->dma_alignment = mask; | ||
782 | } | ||
783 | |||
784 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); | ||
785 | |||
786 | /** | ||
763 | * blk_queue_find_tag - find a request by its tag and queue | 787 | * blk_queue_find_tag - find a request by its tag and queue |
764 | * @q: The request queue for the device | 788 | * @q: The request queue for the device |
765 | * @tag: The tag of the request | 789 | * @tag: The tag of the request |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d18ee67b40f8..81e99e516302 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -766,6 +766,7 @@ extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); | |||
766 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); | 766 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
767 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | 767 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); |
768 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 768 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
769 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | ||
769 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 770 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
770 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 771 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
771 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); | 772 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); |