diff options
author | Martin K. Petersen <martin.petersen@oracle.com> | 2012-09-18 12:19:27 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-09-20 08:31:45 -0400 |
commit | 4363ac7c13a9a4b763c6e8d9fdbfc2468f3b8ca4 (patch) | |
tree | 010b05699eb9544b9cdfe5e1b3affdaea80132e7 /include/linux/blkdev.h | |
parent | f31dc1cd490539e2b62a126bc4dc2495b165d772 (diff) |
block: Implement support for WRITE SAME
The WRITE SAME command supported on some SCSI devices allows the same
block to be efficiently replicated throughout a block range. Only a
single logical block is transferred from the host and the storage device
writes the same data to all blocks described by the I/O.
This patch implements support for WRITE SAME in the block layer. The
blkdev_issue_write_same() function can be used by filesystems and block
drivers to replicate a buffer across a block range. This can be used to
efficiently initialize software RAID devices, etc.
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 29 |
1 files changed, 29 insertions, 0 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 90f7abe8f183..1756001210d2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -270,6 +270,7 @@ struct queue_limits { | |||
270 | unsigned int io_min; | 270 | unsigned int io_min; |
271 | unsigned int io_opt; | 271 | unsigned int io_opt; |
272 | unsigned int max_discard_sectors; | 272 | unsigned int max_discard_sectors; |
273 | unsigned int max_write_same_sectors; | ||
273 | unsigned int discard_granularity; | 274 | unsigned int discard_granularity; |
274 | unsigned int discard_alignment; | 275 | unsigned int discard_alignment; |
275 | 276 | ||
@@ -614,9 +615,20 @@ static inline bool blk_check_merge_flags(unsigned int flags1, | |||
614 | if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) | 615 | if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) |
615 | return false; | 616 | return false; |
616 | 617 | ||
618 | if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME)) | ||
619 | return false; | ||
620 | |||
617 | return true; | 621 | return true; |
618 | } | 622 | } |
619 | 623 | ||
624 | static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) | ||
625 | { | ||
626 | if (bio_data(a) == bio_data(b)) | ||
627 | return true; | ||
628 | |||
629 | return false; | ||
630 | } | ||
631 | |||
620 | /* | 632 | /* |
621 | * q->prep_rq_fn return values | 633 | * q->prep_rq_fn return values |
622 | */ | 634 | */ |
@@ -818,6 +830,9 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, | |||
818 | if (unlikely(cmd_flags & REQ_DISCARD)) | 830 | if (unlikely(cmd_flags & REQ_DISCARD)) |
819 | return q->limits.max_discard_sectors; | 831 | return q->limits.max_discard_sectors; |
820 | 832 | ||
833 | if (unlikely(cmd_flags & REQ_WRITE_SAME)) | ||
834 | return q->limits.max_write_same_sectors; | ||
835 | |||
821 | return q->limits.max_sectors; | 836 | return q->limits.max_sectors; |
822 | } | 837 | } |
823 | 838 | ||
@@ -886,6 +901,8 @@ extern void blk_queue_max_segments(struct request_queue *, unsigned short); | |||
886 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 901 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
887 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 902 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
888 | unsigned int max_discard_sectors); | 903 | unsigned int max_discard_sectors); |
904 | extern void blk_queue_max_write_same_sectors(struct request_queue *q, | ||
905 | unsigned int max_write_same_sectors); | ||
889 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); | 906 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
890 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); | 907 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); |
891 | extern void blk_queue_alignment_offset(struct request_queue *q, | 908 | extern void blk_queue_alignment_offset(struct request_queue *q, |
@@ -1016,6 +1033,8 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | |||
1016 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); | 1033 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); |
1017 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 1034 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
1018 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); | 1035 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); |
1036 | extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | ||
1037 | sector_t nr_sects, gfp_t gfp_mask, struct page *page); | ||
1019 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | 1038 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
1020 | sector_t nr_sects, gfp_t gfp_mask); | 1039 | sector_t nr_sects, gfp_t gfp_mask); |
1021 | static inline int sb_issue_discard(struct super_block *sb, sector_t block, | 1040 | static inline int sb_issue_discard(struct super_block *sb, sector_t block, |
@@ -1193,6 +1212,16 @@ static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) | |||
1193 | return queue_discard_zeroes_data(bdev_get_queue(bdev)); | 1212 | return queue_discard_zeroes_data(bdev_get_queue(bdev)); |
1194 | } | 1213 | } |
1195 | 1214 | ||
1215 | static inline unsigned int bdev_write_same(struct block_device *bdev) | ||
1216 | { | ||
1217 | struct request_queue *q = bdev_get_queue(bdev); | ||
1218 | |||
1219 | if (q) | ||
1220 | return q->limits.max_write_same_sectors; | ||
1221 | |||
1222 | return 0; | ||
1223 | } | ||
1224 | |||
1196 | static inline int queue_dma_alignment(struct request_queue *q) | 1225 | static inline int queue_dma_alignment(struct request_queue *q) |
1197 | { | 1226 | { |
1198 | return q ? q->dma_alignment : 511; | 1227 | return q ? q->dma_alignment : 511; |