aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-block14
-rw-r--r--block/blk-core.c14
-rw-r--r--block/blk-lib.c74
-rw-r--r--block/blk-merge.c9
-rw-r--r--block/blk-settings.c16
-rw-r--r--block/blk-sysfs.c13
-rw-r--r--drivers/md/raid0.c1
-rw-r--r--fs/bio.c9
-rw-r--r--include/linux/bio.h3
-rw-r--r--include/linux/blk_types.h5
-rw-r--r--include/linux/blkdev.h29
11 files changed, 181 insertions, 6 deletions
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index c1eb41cb9876..279da08f7541 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -206,3 +206,17 @@ Description:
206 when a discarded area is read the discard_zeroes_data 206 when a discarded area is read the discard_zeroes_data
207 parameter will be set to one. Otherwise it will be 0 and 207 parameter will be set to one. Otherwise it will be 0 and
208 the result of reading a discarded area is undefined. 208 the result of reading a discarded area is undefined.
209
210What: /sys/block/<disk>/queue/write_same_max_bytes
211Date: January 2012
212Contact: Martin K. Petersen <martin.petersen@oracle.com>
213Description:
214 Some devices support a write same operation in which a
215 single data block can be written to a range of several
216 contiguous blocks on storage. This can be used to wipe
217 areas on disk or to initialize drives in a RAID
218 configuration. write_same_max_bytes indicates how many
219 bytes can be written in a single write same command. If
220 write_same_max_bytes is 0, write same is not supported
221 by the device.
222
diff --git a/block/blk-core.c b/block/blk-core.c
index 33eded00c5b1..3b080541098e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1704,6 +1704,11 @@ generic_make_request_checks(struct bio *bio)
1704 goto end_io; 1704 goto end_io;
1705 } 1705 }
1706 1706
1707 if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
1708 err = -EOPNOTSUPP;
1709 goto end_io;
1710 }
1711
1707 /* 1712 /*
1708 * Various block parts want %current->io_context and lazy ioc 1713 * Various block parts want %current->io_context and lazy ioc
1709 * allocation ends up trading a lot of pain for a small amount of 1714 * allocation ends up trading a lot of pain for a small amount of
@@ -1809,8 +1814,6 @@ EXPORT_SYMBOL(generic_make_request);
1809 */ 1814 */
1810void submit_bio(int rw, struct bio *bio) 1815void submit_bio(int rw, struct bio *bio)
1811{ 1816{
1812 int count = bio_sectors(bio);
1813
1814 bio->bi_rw |= rw; 1817 bio->bi_rw |= rw;
1815 1818
1816 /* 1819 /*
@@ -1818,6 +1821,13 @@ void submit_bio(int rw, struct bio *bio)
1818 * go through the normal accounting stuff before submission. 1821 * go through the normal accounting stuff before submission.
1819 */ 1822 */
1820 if (bio_has_data(bio)) { 1823 if (bio_has_data(bio)) {
1824 unsigned int count;
1825
1826 if (unlikely(rw & REQ_WRITE_SAME))
1827 count = bdev_logical_block_size(bio->bi_bdev) >> 9;
1828 else
1829 count = bio_sectors(bio);
1830
1821 if (rw & WRITE) { 1831 if (rw & WRITE) {
1822 count_vm_events(PGPGOUT, count); 1832 count_vm_events(PGPGOUT, count);
1823 } else { 1833 } else {
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 19cc761cacb2..a062543c58ac 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -130,6 +130,80 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
130EXPORT_SYMBOL(blkdev_issue_discard); 130EXPORT_SYMBOL(blkdev_issue_discard);
131 131
132/** 132/**
133 * blkdev_issue_write_same - queue a write same operation
134 * @bdev: target blockdev
135 * @sector: start sector
136 * @nr_sects: number of sectors to write
137 * @gfp_mask: memory allocation flags (for bio_alloc)
138 * @page: page containing data to write
139 *
140 * Description:
141 * Issue a write same request for the sectors in question.
142 */
143int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
144 sector_t nr_sects, gfp_t gfp_mask,
145 struct page *page)
146{
147 DECLARE_COMPLETION_ONSTACK(wait);
148 struct request_queue *q = bdev_get_queue(bdev);
149 unsigned int max_write_same_sectors;
150 struct bio_batch bb;
151 struct bio *bio;
152 int ret = 0;
153
154 if (!q)
155 return -ENXIO;
156
157 max_write_same_sectors = q->limits.max_write_same_sectors;
158
159 if (max_write_same_sectors == 0)
160 return -EOPNOTSUPP;
161
162 atomic_set(&bb.done, 1);
163 bb.flags = 1 << BIO_UPTODATE;
164 bb.wait = &wait;
165
166 while (nr_sects) {
167 bio = bio_alloc(gfp_mask, 1);
168 if (!bio) {
169 ret = -ENOMEM;
170 break;
171 }
172
173 bio->bi_sector = sector;
174 bio->bi_end_io = bio_batch_end_io;
175 bio->bi_bdev = bdev;
176 bio->bi_private = &bb;
177 bio->bi_vcnt = 1;
178 bio->bi_io_vec->bv_page = page;
179 bio->bi_io_vec->bv_offset = 0;
180 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
181
182 if (nr_sects > max_write_same_sectors) {
183 bio->bi_size = max_write_same_sectors << 9;
184 nr_sects -= max_write_same_sectors;
185 sector += max_write_same_sectors;
186 } else {
187 bio->bi_size = nr_sects << 9;
188 nr_sects = 0;
189 }
190
191 atomic_inc(&bb.done);
192 submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
193 }
194
195 /* Wait for bios in-flight */
196 if (!atomic_dec_and_test(&bb.done))
197 wait_for_completion(&wait);
198
199 if (!test_bit(BIO_UPTODATE, &bb.flags))
200 ret = -ENOTSUPP;
201
202 return ret;
203}
204EXPORT_SYMBOL(blkdev_issue_write_same);
205
206/**
133 * blkdev_issue_zeroout - generate number of zero filed write bios 207 * blkdev_issue_zeroout - generate number of zero filed write bios
134 * @bdev: blockdev to issue 208 * @bdev: blockdev to issue
135 * @sector: start sector 209 * @sector: start sector
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 642b862608a1..936a110de0b9 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -419,6 +419,10 @@ static int attempt_merge(struct request_queue *q, struct request *req,
419 || next->special) 419 || next->special)
420 return 0; 420 return 0;
421 421
422 if (req->cmd_flags & REQ_WRITE_SAME &&
423 !blk_write_same_mergeable(req->bio, next->bio))
424 return 0;
425
422 /* 426 /*
423 * If we are allowed to merge, then append bio list 427 * If we are allowed to merge, then append bio list
424 * from next to rq and release next. merge_requests_fn 428 * from next to rq and release next. merge_requests_fn
@@ -518,6 +522,11 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
518 if (bio_integrity(bio) != blk_integrity_rq(rq)) 522 if (bio_integrity(bio) != blk_integrity_rq(rq))
519 return false; 523 return false;
520 524
525 /* must be using the same buffer */
526 if (rq->cmd_flags & REQ_WRITE_SAME &&
527 !blk_write_same_mergeable(rq->bio, bio))
528 return false;
529
521 return true; 530 return true;
522} 531}
523 532
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 565a6786032f..779bb7646bcd 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -113,6 +113,7 @@ void blk_set_default_limits(struct queue_limits *lim)
113 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 113 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
114 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 114 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
115 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; 115 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
116 lim->max_write_same_sectors = 0;
116 lim->max_discard_sectors = 0; 117 lim->max_discard_sectors = 0;
117 lim->discard_granularity = 0; 118 lim->discard_granularity = 0;
118 lim->discard_alignment = 0; 119 lim->discard_alignment = 0;
@@ -144,6 +145,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
144 lim->max_segments = USHRT_MAX; 145 lim->max_segments = USHRT_MAX;
145 lim->max_hw_sectors = UINT_MAX; 146 lim->max_hw_sectors = UINT_MAX;
146 lim->max_sectors = UINT_MAX; 147 lim->max_sectors = UINT_MAX;
148 lim->max_write_same_sectors = UINT_MAX;
147} 149}
148EXPORT_SYMBOL(blk_set_stacking_limits); 150EXPORT_SYMBOL(blk_set_stacking_limits);
149 151
@@ -286,6 +288,18 @@ void blk_queue_max_discard_sectors(struct request_queue *q,
286EXPORT_SYMBOL(blk_queue_max_discard_sectors); 288EXPORT_SYMBOL(blk_queue_max_discard_sectors);
287 289
288/** 290/**
291 * blk_queue_max_write_same_sectors - set max sectors for a single write same
292 * @q: the request queue for the device
293 * @max_write_same_sectors: maximum number of sectors to write per command
294 **/
295void blk_queue_max_write_same_sectors(struct request_queue *q,
296 unsigned int max_write_same_sectors)
297{
298 q->limits.max_write_same_sectors = max_write_same_sectors;
299}
300EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
301
302/**
289 * blk_queue_max_segments - set max hw segments for a request for this queue 303 * blk_queue_max_segments - set max hw segments for a request for this queue
290 * @q: the request queue for the device 304 * @q: the request queue for the device
291 * @max_segments: max number of segments 305 * @max_segments: max number of segments
@@ -510,6 +524,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
510 524
511 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 525 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
512 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 526 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
527 t->max_write_same_sectors = min(t->max_write_same_sectors,
528 b->max_write_same_sectors);
513 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); 529 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
514 530
515 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, 531 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index ea51d827a0bb..247dbfd42621 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -180,6 +180,13 @@ static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *pag
180 return queue_var_show(queue_discard_zeroes_data(q), page); 180 return queue_var_show(queue_discard_zeroes_data(q), page);
181} 181}
182 182
183static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
184{
185 return sprintf(page, "%llu\n",
186 (unsigned long long)q->limits.max_write_same_sectors << 9);
187}
188
189
183static ssize_t 190static ssize_t
184queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 191queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
185{ 192{
@@ -385,6 +392,11 @@ static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
385 .show = queue_discard_zeroes_data_show, 392 .show = queue_discard_zeroes_data_show,
386}; 393};
387 394
395static struct queue_sysfs_entry queue_write_same_max_entry = {
396 .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO },
397 .show = queue_write_same_max_show,
398};
399
388static struct queue_sysfs_entry queue_nonrot_entry = { 400static struct queue_sysfs_entry queue_nonrot_entry = {
389 .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, 401 .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
390 .show = queue_show_nonrot, 402 .show = queue_show_nonrot,
@@ -432,6 +444,7 @@ static struct attribute *default_attrs[] = {
432 &queue_discard_granularity_entry.attr, 444 &queue_discard_granularity_entry.attr,
433 &queue_discard_max_entry.attr, 445 &queue_discard_max_entry.attr,
434 &queue_discard_zeroes_data_entry.attr, 446 &queue_discard_zeroes_data_entry.attr,
447 &queue_write_same_max_entry.attr,
435 &queue_nonrot_entry.attr, 448 &queue_nonrot_entry.attr,
436 &queue_nomerges_entry.attr, 449 &queue_nomerges_entry.attr,
437 &queue_rq_affinity_entry.attr, 450 &queue_rq_affinity_entry.attr,
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index de63a1fc3737..a9e4fa95dfaa 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -422,6 +422,7 @@ static int raid0_run(struct mddev *mddev)
422 if (md_check_no_bitmap(mddev)) 422 if (md_check_no_bitmap(mddev))
423 return -EINVAL; 423 return -EINVAL;
424 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); 424 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
425 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
425 426
426 /* if private is not null, we are here after takeover */ 427 /* if private is not null, we are here after takeover */
427 if (mddev->private == NULL) { 428 if (mddev->private == NULL) {
diff --git a/fs/bio.c b/fs/bio.c
index 13e956779e10..f855e0e1869c 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1487,9 +1487,12 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
1487 1487
1488 bp->bv1 = bi->bi_io_vec[0]; 1488 bp->bv1 = bi->bi_io_vec[0];
1489 bp->bv2 = bi->bi_io_vec[0]; 1489 bp->bv2 = bi->bi_io_vec[0];
1490 bp->bv2.bv_offset += first_sectors << 9; 1490
1491 bp->bv2.bv_len -= first_sectors << 9; 1491 if (bio_is_rw(bi)) {
1492 bp->bv1.bv_len = first_sectors << 9; 1492 bp->bv2.bv_offset += first_sectors << 9;
1493 bp->bv2.bv_len -= first_sectors << 9;
1494 bp->bv1.bv_len = first_sectors << 9;
1495 }
1493 1496
1494 bp->bio1.bi_io_vec = &bp->bv1; 1497 bp->bio1.bi_io_vec = &bp->bv1;
1495 bp->bio2.bi_io_vec = &bp->bv2; 1498 bp->bio2.bi_io_vec = &bp->bv2;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index e54305cacc98..820e7aaad4fd 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -399,6 +399,9 @@ static inline bool bio_is_rw(struct bio *bio)
399 if (!bio_has_data(bio)) 399 if (!bio_has_data(bio))
400 return false; 400 return false;
401 401
402 if (bio->bi_rw & REQ_WRITE_SAME)
403 return false;
404
402 return true; 405 return true;
403} 406}
404 407
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 1b229664f573..cdf11191e645 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -147,6 +147,7 @@ enum rq_flag_bits {
147 __REQ_PRIO, /* boost priority in cfq */ 147 __REQ_PRIO, /* boost priority in cfq */
148 __REQ_DISCARD, /* request to discard sectors */ 148 __REQ_DISCARD, /* request to discard sectors */
149 __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ 149 __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
150 __REQ_WRITE_SAME, /* write same block many times */
150 151
151 __REQ_NOIDLE, /* don't anticipate more IO after this one */ 152 __REQ_NOIDLE, /* don't anticipate more IO after this one */
152 __REQ_FUA, /* forced unit access */ 153 __REQ_FUA, /* forced unit access */
@@ -185,13 +186,15 @@ enum rq_flag_bits {
185#define REQ_META (1 << __REQ_META) 186#define REQ_META (1 << __REQ_META)
186#define REQ_PRIO (1 << __REQ_PRIO) 187#define REQ_PRIO (1 << __REQ_PRIO)
187#define REQ_DISCARD (1 << __REQ_DISCARD) 188#define REQ_DISCARD (1 << __REQ_DISCARD)
189#define REQ_WRITE_SAME (1 << __REQ_WRITE_SAME)
188#define REQ_NOIDLE (1 << __REQ_NOIDLE) 190#define REQ_NOIDLE (1 << __REQ_NOIDLE)
189 191
190#define REQ_FAILFAST_MASK \ 192#define REQ_FAILFAST_MASK \
191 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 193 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
192#define REQ_COMMON_MASK \ 194#define REQ_COMMON_MASK \
193 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \ 195 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
194 REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE) 196 REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
197 REQ_SECURE)
195#define REQ_CLONE_MASK REQ_COMMON_MASK 198#define REQ_CLONE_MASK REQ_COMMON_MASK
196 199
197/* This mask is used for both bio and request merge checking */ 200/* This mask is used for both bio and request merge checking */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 90f7abe8f183..1756001210d2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -270,6 +270,7 @@ struct queue_limits {
270 unsigned int io_min; 270 unsigned int io_min;
271 unsigned int io_opt; 271 unsigned int io_opt;
272 unsigned int max_discard_sectors; 272 unsigned int max_discard_sectors;
273 unsigned int max_write_same_sectors;
273 unsigned int discard_granularity; 274 unsigned int discard_granularity;
274 unsigned int discard_alignment; 275 unsigned int discard_alignment;
275 276
@@ -614,9 +615,20 @@ static inline bool blk_check_merge_flags(unsigned int flags1,
614 if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) 615 if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
615 return false; 616 return false;
616 617
618 if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME))
619 return false;
620
617 return true; 621 return true;
618} 622}
619 623
624static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
625{
626 if (bio_data(a) == bio_data(b))
627 return true;
628
629 return false;
630}
631
620/* 632/*
621 * q->prep_rq_fn return values 633 * q->prep_rq_fn return values
622 */ 634 */
@@ -818,6 +830,9 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
818 if (unlikely(cmd_flags & REQ_DISCARD)) 830 if (unlikely(cmd_flags & REQ_DISCARD))
819 return q->limits.max_discard_sectors; 831 return q->limits.max_discard_sectors;
820 832
833 if (unlikely(cmd_flags & REQ_WRITE_SAME))
834 return q->limits.max_write_same_sectors;
835
821 return q->limits.max_sectors; 836 return q->limits.max_sectors;
822} 837}
823 838
@@ -886,6 +901,8 @@ extern void blk_queue_max_segments(struct request_queue *, unsigned short);
886extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 901extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
887extern void blk_queue_max_discard_sectors(struct request_queue *q, 902extern void blk_queue_max_discard_sectors(struct request_queue *q,
888 unsigned int max_discard_sectors); 903 unsigned int max_discard_sectors);
904extern void blk_queue_max_write_same_sectors(struct request_queue *q,
905 unsigned int max_write_same_sectors);
889extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 906extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
890extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 907extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
891extern void blk_queue_alignment_offset(struct request_queue *q, 908extern void blk_queue_alignment_offset(struct request_queue *q,
@@ -1016,6 +1033,8 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
1016extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1033extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
1017extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1034extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1018 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1035 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
1036extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1037 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
1019extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1038extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1020 sector_t nr_sects, gfp_t gfp_mask); 1039 sector_t nr_sects, gfp_t gfp_mask);
1021static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1040static inline int sb_issue_discard(struct super_block *sb, sector_t block,
@@ -1193,6 +1212,16 @@ static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1193 return queue_discard_zeroes_data(bdev_get_queue(bdev)); 1212 return queue_discard_zeroes_data(bdev_get_queue(bdev));
1194} 1213}
1195 1214
1215static inline unsigned int bdev_write_same(struct block_device *bdev)
1216{
1217 struct request_queue *q = bdev_get_queue(bdev);
1218
1219 if (q)
1220 return q->limits.max_write_same_sectors;
1221
1222 return 0;
1223}
1224
1196static inline int queue_dma_alignment(struct request_queue *q) 1225static inline int queue_dma_alignment(struct request_queue *q)
1197{ 1226{
1198 return q ? q->dma_alignment : 511; 1227 return q ? q->dma_alignment : 511;