summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2012-09-18 12:19:27 -0400
committerJens Axboe <axboe@kernel.dk>2012-09-20 08:31:45 -0400
commit4363ac7c13a9a4b763c6e8d9fdbfc2468f3b8ca4 (patch)
tree010b05699eb9544b9cdfe5e1b3affdaea80132e7 /include/linux
parentf31dc1cd490539e2b62a126bc4dc2495b165d772 (diff)
block: Implement support for WRITE SAME
The WRITE SAME command supported on some SCSI devices allows the same block to be efficiently replicated throughout a block range. Only a single logical block is transferred from the host and the storage device writes the same data to all blocks described by the I/O. This patch implements support for WRITE SAME in the block layer. The blkdev_issue_write_same() function can be used by filesystems and block drivers to replicate a buffer across a block range. This can be used to efficiently initialize software RAID devices, etc. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Acked-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bio.h3
-rw-r--r--include/linux/blk_types.h5
-rw-r--r--include/linux/blkdev.h29
3 files changed, 36 insertions, 1 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index e54305cacc98..820e7aaad4fd 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -399,6 +399,9 @@ static inline bool bio_is_rw(struct bio *bio)
399 if (!bio_has_data(bio)) 399 if (!bio_has_data(bio))
400 return false; 400 return false;
401 401
402 if (bio->bi_rw & REQ_WRITE_SAME)
403 return false;
404
402 return true; 405 return true;
403} 406}
404 407
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 1b229664f573..cdf11191e645 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -147,6 +147,7 @@ enum rq_flag_bits {
147 __REQ_PRIO, /* boost priority in cfq */ 147 __REQ_PRIO, /* boost priority in cfq */
148 __REQ_DISCARD, /* request to discard sectors */ 148 __REQ_DISCARD, /* request to discard sectors */
149 __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ 149 __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
150 __REQ_WRITE_SAME, /* write same block many times */
150 151
151 __REQ_NOIDLE, /* don't anticipate more IO after this one */ 152 __REQ_NOIDLE, /* don't anticipate more IO after this one */
152 __REQ_FUA, /* forced unit access */ 153 __REQ_FUA, /* forced unit access */
@@ -185,13 +186,15 @@ enum rq_flag_bits {
185#define REQ_META (1 << __REQ_META) 186#define REQ_META (1 << __REQ_META)
186#define REQ_PRIO (1 << __REQ_PRIO) 187#define REQ_PRIO (1 << __REQ_PRIO)
187#define REQ_DISCARD (1 << __REQ_DISCARD) 188#define REQ_DISCARD (1 << __REQ_DISCARD)
189#define REQ_WRITE_SAME (1 << __REQ_WRITE_SAME)
188#define REQ_NOIDLE (1 << __REQ_NOIDLE) 190#define REQ_NOIDLE (1 << __REQ_NOIDLE)
189 191
190#define REQ_FAILFAST_MASK \ 192#define REQ_FAILFAST_MASK \
191 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 193 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
192#define REQ_COMMON_MASK \ 194#define REQ_COMMON_MASK \
193 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \ 195 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
194 REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE) 196 REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
197 REQ_SECURE)
195#define REQ_CLONE_MASK REQ_COMMON_MASK 198#define REQ_CLONE_MASK REQ_COMMON_MASK
196 199
197/* This mask is used for both bio and request merge checking */ 200/* This mask is used for both bio and request merge checking */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 90f7abe8f183..1756001210d2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -270,6 +270,7 @@ struct queue_limits {
270 unsigned int io_min; 270 unsigned int io_min;
271 unsigned int io_opt; 271 unsigned int io_opt;
272 unsigned int max_discard_sectors; 272 unsigned int max_discard_sectors;
273 unsigned int max_write_same_sectors;
273 unsigned int discard_granularity; 274 unsigned int discard_granularity;
274 unsigned int discard_alignment; 275 unsigned int discard_alignment;
275 276
@@ -614,9 +615,20 @@ static inline bool blk_check_merge_flags(unsigned int flags1,
614 if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) 615 if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
615 return false; 616 return false;
616 617
618 if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME))
619 return false;
620
617 return true; 621 return true;
618} 622}
619 623
624static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
625{
626 if (bio_data(a) == bio_data(b))
627 return true;
628
629 return false;
630}
631
620/* 632/*
621 * q->prep_rq_fn return values 633 * q->prep_rq_fn return values
622 */ 634 */
@@ -818,6 +830,9 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
818 if (unlikely(cmd_flags & REQ_DISCARD)) 830 if (unlikely(cmd_flags & REQ_DISCARD))
819 return q->limits.max_discard_sectors; 831 return q->limits.max_discard_sectors;
820 832
833 if (unlikely(cmd_flags & REQ_WRITE_SAME))
834 return q->limits.max_write_same_sectors;
835
821 return q->limits.max_sectors; 836 return q->limits.max_sectors;
822} 837}
823 838
@@ -886,6 +901,8 @@ extern void blk_queue_max_segments(struct request_queue *, unsigned short);
886extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 901extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
887extern void blk_queue_max_discard_sectors(struct request_queue *q, 902extern void blk_queue_max_discard_sectors(struct request_queue *q,
888 unsigned int max_discard_sectors); 903 unsigned int max_discard_sectors);
904extern void blk_queue_max_write_same_sectors(struct request_queue *q,
905 unsigned int max_write_same_sectors);
889extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 906extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
890extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 907extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
891extern void blk_queue_alignment_offset(struct request_queue *q, 908extern void blk_queue_alignment_offset(struct request_queue *q,
@@ -1016,6 +1033,8 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
1016extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1033extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
1017extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1034extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1018 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1035 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
1036extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1037 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
1019extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1038extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1020 sector_t nr_sects, gfp_t gfp_mask); 1039 sector_t nr_sects, gfp_t gfp_mask);
1021static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1040static inline int sb_issue_discard(struct super_block *sb, sector_t block,
@@ -1193,6 +1212,16 @@ static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1193 return queue_discard_zeroes_data(bdev_get_queue(bdev)); 1212 return queue_discard_zeroes_data(bdev_get_queue(bdev));
1194} 1213}
1195 1214
1215static inline unsigned int bdev_write_same(struct block_device *bdev)
1216{
1217 struct request_queue *q = bdev_get_queue(bdev);
1218
1219 if (q)
1220 return q->limits.max_write_same_sectors;
1221
1222 return 0;
1223}
1224
1196static inline int queue_dma_alignment(struct request_queue *q) 1225static inline int queue_dma_alignment(struct request_queue *q)
1197{ 1226{
1198 return q ? q->dma_alignment : 511; 1227 return q ? q->dma_alignment : 511;