aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-01-14 20:07:04 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-01-14 20:07:04 -0500
commit34241af77b8696120a9735bb2579ec7044199a8b (patch)
tree79f83b31abcd58b49521136a937d39aba45739b7
parentf0ad17712b9f71c24e2b8b9725230ef57232377f (diff)
parentbef13315e990fd3d3fb4c39013aefd53f06c3657 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: - the virtio_blk stack DMA corruption fix from Christoph, fixing and issue with VMAP stacks. - O_DIRECT blkbits calculation fix from Chandan. - discard regression fix from Christoph. - queue init error handling fixes for nbd and virtio_blk, from Omar and Jeff. - two small nvme fixes, from Christoph and Guilherme. - rename of blk_queue_zone_size and bdev_zone_size to _sectors instead, to more closely follow what we do in other places in the block layer. This interface is new for this series, so let's get the naming right before releasing a kernel with this feature. From Damien. * 'for-linus' of git://git.kernel.dk/linux-block: block: don't try to discard from __blkdev_issue_zeroout sd: remove __data_len hack for WRITE SAME nvme: use blk_rq_payload_bytes scsi: use blk_rq_payload_bytes block: add blk_rq_payload_bytes block: Rename blk_queue_zone_size and bdev_zone_size nvme: apply DELAY_BEFORE_CHK_RDY quirk at probe time too nvme-rdma: fix nvme_rdma_queue_is_ready virtio_blk: fix panic in initialization error path nbd: blk_mq_init_queue returns an error code on failure, not NULL virtio_blk: avoid DMA to stack for the sense buffer do_direct_IO: Use inode->i_blkbits to compute block count to be cleaned
-rw-r--r--block/blk-lib.c13
-rw-r--r--block/blk-zoned.c4
-rw-r--r--block/partition-generic.c14
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/nvme/host/core.c7
-rw-r--r--drivers/nvme/host/fc.c5
-rw-r--r--drivers/nvme/host/nvme.h8
-rw-r--r--drivers/nvme/host/pci.c19
-rw-r--r--drivers/nvme/host/rdma.c15
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/sd.c17
-rw-r--r--fs/direct-io.c3
-rw-r--r--fs/f2fs/segment.c4
-rw-r--r--fs/f2fs/super.c6
-rw-r--r--include/linux/blkdev.h19
16 files changed, 66 insertions, 83 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c
index ed89c8f4b2a0..f8c82a9b4012 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -301,13 +301,6 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
301 if ((sector | nr_sects) & bs_mask) 301 if ((sector | nr_sects) & bs_mask)
302 return -EINVAL; 302 return -EINVAL;
303 303
304 if (discard) {
305 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
306 BLKDEV_DISCARD_ZERO, biop);
307 if (ret == 0 || (ret && ret != -EOPNOTSUPP))
308 goto out;
309 }
310
311 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, 304 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
312 biop); 305 biop);
313 if (ret == 0 || (ret && ret != -EOPNOTSUPP)) 306 if (ret == 0 || (ret && ret != -EOPNOTSUPP))
@@ -370,6 +363,12 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
370 struct bio *bio = NULL; 363 struct bio *bio = NULL;
371 struct blk_plug plug; 364 struct blk_plug plug;
372 365
366 if (discard) {
367 if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
368 BLKDEV_DISCARD_ZERO))
369 return 0;
370 }
371
373 blk_start_plug(&plug); 372 blk_start_plug(&plug);
374 ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, 373 ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
375 &bio, discard); 374 &bio, discard);
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 472211fa183a..3bd15d8095b1 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -16,7 +16,7 @@
16static inline sector_t blk_zone_start(struct request_queue *q, 16static inline sector_t blk_zone_start(struct request_queue *q,
17 sector_t sector) 17 sector_t sector)
18{ 18{
19 sector_t zone_mask = blk_queue_zone_size(q) - 1; 19 sector_t zone_mask = blk_queue_zone_sectors(q) - 1;
20 20
21 return sector & ~zone_mask; 21 return sector & ~zone_mask;
22} 22}
@@ -222,7 +222,7 @@ int blkdev_reset_zones(struct block_device *bdev,
222 return -EINVAL; 222 return -EINVAL;
223 223
224 /* Check alignment (handle eventual smaller last zone) */ 224 /* Check alignment (handle eventual smaller last zone) */
225 zone_sectors = blk_queue_zone_size(q); 225 zone_sectors = blk_queue_zone_sectors(q);
226 if (sector & (zone_sectors - 1)) 226 if (sector & (zone_sectors - 1))
227 return -EINVAL; 227 return -EINVAL;
228 228
diff --git a/block/partition-generic.c b/block/partition-generic.c
index d7beb6bbbf66..7afb9907821f 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -434,7 +434,7 @@ static bool part_zone_aligned(struct gendisk *disk,
434 struct block_device *bdev, 434 struct block_device *bdev,
435 sector_t from, sector_t size) 435 sector_t from, sector_t size)
436{ 436{
437 unsigned int zone_size = bdev_zone_size(bdev); 437 unsigned int zone_sectors = bdev_zone_sectors(bdev);
438 438
439 /* 439 /*
440 * If this function is called, then the disk is a zoned block device 440 * If this function is called, then the disk is a zoned block device
@@ -446,7 +446,7 @@ static bool part_zone_aligned(struct gendisk *disk,
446 * regular block devices (no zone operation) and their zone size will 446 * regular block devices (no zone operation) and their zone size will
447 * be reported as 0. Allow this case. 447 * be reported as 0. Allow this case.
448 */ 448 */
449 if (!zone_size) 449 if (!zone_sectors)
450 return true; 450 return true;
451 451
452 /* 452 /*
@@ -455,24 +455,24 @@ static bool part_zone_aligned(struct gendisk *disk,
455 * use it. Check the zone size too: it should be a power of 2 number 455 * use it. Check the zone size too: it should be a power of 2 number
456 * of sectors. 456 * of sectors.
457 */ 457 */
458 if (WARN_ON_ONCE(!is_power_of_2(zone_size))) { 458 if (WARN_ON_ONCE(!is_power_of_2(zone_sectors))) {
459 u32 rem; 459 u32 rem;
460 460
461 div_u64_rem(from, zone_size, &rem); 461 div_u64_rem(from, zone_sectors, &rem);
462 if (rem) 462 if (rem)
463 return false; 463 return false;
464 if ((from + size) < get_capacity(disk)) { 464 if ((from + size) < get_capacity(disk)) {
465 div_u64_rem(size, zone_size, &rem); 465 div_u64_rem(size, zone_sectors, &rem);
466 if (rem) 466 if (rem)
467 return false; 467 return false;
468 } 468 }
469 469
470 } else { 470 } else {
471 471
472 if (from & (zone_size - 1)) 472 if (from & (zone_sectors - 1))
473 return false; 473 return false;
474 if ((from + size) < get_capacity(disk) && 474 if ((from + size) < get_capacity(disk) &&
475 (size & (zone_size - 1))) 475 (size & (zone_sectors - 1)))
476 return false; 476 return false;
477 477
478 } 478 }
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 38c576f76d36..50a2020b5b72 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1042,6 +1042,7 @@ static int __init nbd_init(void)
1042 return -ENOMEM; 1042 return -ENOMEM;
1043 1043
1044 for (i = 0; i < nbds_max; i++) { 1044 for (i = 0; i < nbds_max; i++) {
1045 struct request_queue *q;
1045 struct gendisk *disk = alloc_disk(1 << part_shift); 1046 struct gendisk *disk = alloc_disk(1 << part_shift);
1046 if (!disk) 1047 if (!disk)
1047 goto out; 1048 goto out;
@@ -1067,12 +1068,13 @@ static int __init nbd_init(void)
1067 * every gendisk to have its very own request_queue struct. 1068 * every gendisk to have its very own request_queue struct.
1068 * These structs are big so we dynamically allocate them. 1069 * These structs are big so we dynamically allocate them.
1069 */ 1070 */
1070 disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set); 1071 q = blk_mq_init_queue(&nbd_dev[i].tag_set);
1071 if (!disk->queue) { 1072 if (IS_ERR(q)) {
1072 blk_mq_free_tag_set(&nbd_dev[i].tag_set); 1073 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
1073 put_disk(disk); 1074 put_disk(disk);
1074 goto out; 1075 goto out;
1075 } 1076 }
1077 disk->queue = q;
1076 1078
1077 /* 1079 /*
1078 * Tell the block layer that we are not a rotational device 1080 * Tell the block layer that we are not a rotational device
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5545a679abd8..10332c24f961 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -56,6 +56,7 @@ struct virtblk_req {
56 struct virtio_blk_outhdr out_hdr; 56 struct virtio_blk_outhdr out_hdr;
57 struct virtio_scsi_inhdr in_hdr; 57 struct virtio_scsi_inhdr in_hdr;
58 u8 status; 58 u8 status;
59 u8 sense[SCSI_SENSE_BUFFERSIZE];
59 struct scatterlist sg[]; 60 struct scatterlist sg[];
60}; 61};
61 62
@@ -102,7 +103,8 @@ static int __virtblk_add_req(struct virtqueue *vq,
102 } 103 }
103 104
104 if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) { 105 if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
105 sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE); 106 memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
107 sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
106 sgs[num_out + num_in++] = &sense; 108 sgs[num_out + num_in++] = &sense;
107 sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr)); 109 sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
108 sgs[num_out + num_in++] = &inhdr; 110 sgs[num_out + num_in++] = &inhdr;
@@ -628,11 +630,12 @@ static int virtblk_probe(struct virtio_device *vdev)
628 if (err) 630 if (err)
629 goto out_put_disk; 631 goto out_put_disk;
630 632
631 q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); 633 q = blk_mq_init_queue(&vblk->tag_set);
632 if (IS_ERR(q)) { 634 if (IS_ERR(q)) {
633 err = -ENOMEM; 635 err = -ENOMEM;
634 goto out_free_tags; 636 goto out_free_tags;
635 } 637 }
638 vblk->disk->queue = q;
636 639
637 q->queuedata = vblk; 640 q->queuedata = vblk;
638 641
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2fc86dc7a8df..8a3c3e32a704 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1106,12 +1106,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
1106 if (ret) 1106 if (ret)
1107 return ret; 1107 return ret;
1108 1108
1109 /* Checking for ctrl->tagset is a trick to avoid sleeping on module 1109 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
1110 * load, since we only need the quirk on reset_controller. Notice
1111 * that the HGST device needs this delay only in firmware activation
1112 * procedure; unfortunately we have no (easy) way to verify this.
1113 */
1114 if ((ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) && ctrl->tagset)
1115 msleep(NVME_QUIRK_DELAY_AMOUNT); 1110 msleep(NVME_QUIRK_DELAY_AMOUNT);
1116 1111
1117 return nvme_wait_ready(ctrl, cap, false); 1112 return nvme_wait_ready(ctrl, cap, false);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index aa0bc60810a7..fcc9dcfdf675 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1654,13 +1654,12 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1654 struct nvme_fc_fcp_op *op) 1654 struct nvme_fc_fcp_op *op)
1655{ 1655{
1656 struct nvmefc_fcp_req *freq = &op->fcp_req; 1656 struct nvmefc_fcp_req *freq = &op->fcp_req;
1657 u32 map_len = nvme_map_len(rq);
1658 enum dma_data_direction dir; 1657 enum dma_data_direction dir;
1659 int ret; 1658 int ret;
1660 1659
1661 freq->sg_cnt = 0; 1660 freq->sg_cnt = 0;
1662 1661
1663 if (!map_len) 1662 if (!blk_rq_payload_bytes(rq))
1664 return 0; 1663 return 0;
1665 1664
1666 freq->sg_table.sgl = freq->first_sgl; 1665 freq->sg_table.sgl = freq->first_sgl;
@@ -1854,7 +1853,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
1854 if (ret) 1853 if (ret)
1855 return ret; 1854 return ret;
1856 1855
1857 data_len = nvme_map_len(rq); 1856 data_len = blk_rq_payload_bytes(rq);
1858 if (data_len) 1857 if (data_len)
1859 io_dir = ((rq_data_dir(rq) == WRITE) ? 1858 io_dir = ((rq_data_dir(rq) == WRITE) ?
1860 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); 1859 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 6377e14586dc..aead6d08ed2c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -225,14 +225,6 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
225 return (sector >> (ns->lba_shift - 9)); 225 return (sector >> (ns->lba_shift - 9));
226} 226}
227 227
228static inline unsigned nvme_map_len(struct request *rq)
229{
230 if (req_op(rq) == REQ_OP_DISCARD)
231 return sizeof(struct nvme_dsm_range);
232 else
233 return blk_rq_bytes(rq);
234}
235
236static inline void nvme_cleanup_cmd(struct request *req) 228static inline void nvme_cleanup_cmd(struct request *req)
237{ 229{
238 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 230 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 19beeb7b2ac2..3faefabf339c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -306,11 +306,11 @@ static __le64 **iod_list(struct request *req)
306 return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req)); 306 return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
307} 307}
308 308
309static int nvme_init_iod(struct request *rq, unsigned size, 309static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
310 struct nvme_dev *dev)
311{ 310{
312 struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); 311 struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
313 int nseg = blk_rq_nr_phys_segments(rq); 312 int nseg = blk_rq_nr_phys_segments(rq);
313 unsigned int size = blk_rq_payload_bytes(rq);
314 314
315 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { 315 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
316 iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC); 316 iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
@@ -420,12 +420,11 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
420} 420}
421#endif 421#endif
422 422
423static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req, 423static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
424 int total_len)
425{ 424{
426 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 425 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
427 struct dma_pool *pool; 426 struct dma_pool *pool;
428 int length = total_len; 427 int length = blk_rq_payload_bytes(req);
429 struct scatterlist *sg = iod->sg; 428 struct scatterlist *sg = iod->sg;
430 int dma_len = sg_dma_len(sg); 429 int dma_len = sg_dma_len(sg);
431 u64 dma_addr = sg_dma_address(sg); 430 u64 dma_addr = sg_dma_address(sg);
@@ -501,7 +500,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
501} 500}
502 501
503static int nvme_map_data(struct nvme_dev *dev, struct request *req, 502static int nvme_map_data(struct nvme_dev *dev, struct request *req,
504 unsigned size, struct nvme_command *cmnd) 503 struct nvme_command *cmnd)
505{ 504{
506 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 505 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
507 struct request_queue *q = req->q; 506 struct request_queue *q = req->q;
@@ -519,7 +518,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
519 DMA_ATTR_NO_WARN)) 518 DMA_ATTR_NO_WARN))
520 goto out; 519 goto out;
521 520
522 if (!nvme_setup_prps(dev, req, size)) 521 if (!nvme_setup_prps(dev, req))
523 goto out_unmap; 522 goto out_unmap;
524 523
525 ret = BLK_MQ_RQ_QUEUE_ERROR; 524 ret = BLK_MQ_RQ_QUEUE_ERROR;
@@ -580,7 +579,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
580 struct nvme_dev *dev = nvmeq->dev; 579 struct nvme_dev *dev = nvmeq->dev;
581 struct request *req = bd->rq; 580 struct request *req = bd->rq;
582 struct nvme_command cmnd; 581 struct nvme_command cmnd;
583 unsigned map_len;
584 int ret = BLK_MQ_RQ_QUEUE_OK; 582 int ret = BLK_MQ_RQ_QUEUE_OK;
585 583
586 /* 584 /*
@@ -600,13 +598,12 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
600 if (ret != BLK_MQ_RQ_QUEUE_OK) 598 if (ret != BLK_MQ_RQ_QUEUE_OK)
601 return ret; 599 return ret;
602 600
603 map_len = nvme_map_len(req); 601 ret = nvme_init_iod(req, dev);
604 ret = nvme_init_iod(req, map_len, dev);
605 if (ret != BLK_MQ_RQ_QUEUE_OK) 602 if (ret != BLK_MQ_RQ_QUEUE_OK)
606 goto out_free_cmd; 603 goto out_free_cmd;
607 604
608 if (blk_rq_nr_phys_segments(req)) 605 if (blk_rq_nr_phys_segments(req))
609 ret = nvme_map_data(dev, req, map_len, &cmnd); 606 ret = nvme_map_data(dev, req, &cmnd);
610 607
611 if (ret != BLK_MQ_RQ_QUEUE_OK) 608 if (ret != BLK_MQ_RQ_QUEUE_OK)
612 goto out_cleanup_iod; 609 goto out_cleanup_iod;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f587af345889..557f29b1f1bb 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -981,8 +981,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
981} 981}
982 982
983static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, 983static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
984 struct request *rq, unsigned int map_len, 984 struct request *rq, struct nvme_command *c)
985 struct nvme_command *c)
986{ 985{
987 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 986 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
988 struct nvme_rdma_device *dev = queue->device; 987 struct nvme_rdma_device *dev = queue->device;
@@ -1014,9 +1013,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
1014 } 1013 }
1015 1014
1016 if (count == 1) { 1015 if (count == 1) {
1017 if (rq_data_dir(rq) == WRITE && 1016 if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
1018 map_len <= nvme_rdma_inline_data_size(queue) && 1017 blk_rq_payload_bytes(rq) <=
1019 nvme_rdma_queue_idx(queue)) 1018 nvme_rdma_inline_data_size(queue))
1020 return nvme_rdma_map_sg_inline(queue, req, c); 1019 return nvme_rdma_map_sg_inline(queue, req, c);
1021 1020
1022 if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) 1021 if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
@@ -1422,7 +1421,7 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
1422 struct request *rq) 1421 struct request *rq)
1423{ 1422{
1424 if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { 1423 if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
1425 struct nvme_command *cmd = (struct nvme_command *)rq->cmd; 1424 struct nvme_command *cmd = nvme_req(rq)->cmd;
1426 1425
1427 if (rq->cmd_type != REQ_TYPE_DRV_PRIV || 1426 if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
1428 cmd->common.opcode != nvme_fabrics_command || 1427 cmd->common.opcode != nvme_fabrics_command ||
@@ -1444,7 +1443,6 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1444 struct nvme_command *c = sqe->data; 1443 struct nvme_command *c = sqe->data;
1445 bool flush = false; 1444 bool flush = false;
1446 struct ib_device *dev; 1445 struct ib_device *dev;
1447 unsigned int map_len;
1448 int ret; 1446 int ret;
1449 1447
1450 WARN_ON_ONCE(rq->tag < 0); 1448 WARN_ON_ONCE(rq->tag < 0);
@@ -1462,8 +1460,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1462 1460
1463 blk_mq_start_request(rq); 1461 blk_mq_start_request(rq);
1464 1462
1465 map_len = nvme_map_len(rq); 1463 ret = nvme_rdma_map_data(queue, rq, c);
1466 ret = nvme_rdma_map_data(queue, rq, map_len, c);
1467 if (ret < 0) { 1464 if (ret < 0) {
1468 dev_err(queue->ctrl->ctrl.device, 1465 dev_err(queue->ctrl->ctrl.device,
1469 "Failed to map data (%d)\n", ret); 1466 "Failed to map data (%d)\n", ret);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9fd9a977c695..e9e1e141af9c 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1018,7 +1018,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
1018 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 1018 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1019 BUG_ON(count > sdb->table.nents); 1019 BUG_ON(count > sdb->table.nents);
1020 sdb->table.nents = count; 1020 sdb->table.nents = count;
1021 sdb->length = blk_rq_bytes(req); 1021 sdb->length = blk_rq_payload_bytes(req);
1022 return BLKPREP_OK; 1022 return BLKPREP_OK;
1023} 1023}
1024 1024
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b1933041da39..1fbb1ecf49f2 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -836,7 +836,6 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
836 struct bio *bio = rq->bio; 836 struct bio *bio = rq->bio;
837 sector_t sector = blk_rq_pos(rq); 837 sector_t sector = blk_rq_pos(rq);
838 unsigned int nr_sectors = blk_rq_sectors(rq); 838 unsigned int nr_sectors = blk_rq_sectors(rq);
839 unsigned int nr_bytes = blk_rq_bytes(rq);
840 int ret; 839 int ret;
841 840
842 if (sdkp->device->no_write_same) 841 if (sdkp->device->no_write_same)
@@ -869,21 +868,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
869 868
870 cmd->transfersize = sdp->sector_size; 869 cmd->transfersize = sdp->sector_size;
871 cmd->allowed = SD_MAX_RETRIES; 870 cmd->allowed = SD_MAX_RETRIES;
872 871 return scsi_init_io(cmd);
873 /*
874 * For WRITE_SAME the data transferred in the DATA IN buffer is
875 * different from the amount of data actually written to the target.
876 *
877 * We set up __data_len to the amount of data transferred from the
878 * DATA IN buffer so that blk_rq_map_sg set up the proper S/G list
879 * to transfer a single sector of data first, but then reset it to
880 * the amount of data to be written right after so that the I/O path
881 * knows how much to actually write.
882 */
883 rq->__data_len = sdp->sector_size;
884 ret = scsi_init_io(cmd);
885 rq->__data_len = nr_bytes;
886 return ret;
887} 872}
888 873
889static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd) 874static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
diff --git a/fs/direct-io.c b/fs/direct-io.c
index aeae8c063451..c87bae4376b8 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -906,6 +906,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
906 struct buffer_head *map_bh) 906 struct buffer_head *map_bh)
907{ 907{
908 const unsigned blkbits = sdio->blkbits; 908 const unsigned blkbits = sdio->blkbits;
909 const unsigned i_blkbits = blkbits + sdio->blkfactor;
909 int ret = 0; 910 int ret = 0;
910 911
911 while (sdio->block_in_file < sdio->final_block_in_request) { 912 while (sdio->block_in_file < sdio->final_block_in_request) {
@@ -949,7 +950,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
949 clean_bdev_aliases( 950 clean_bdev_aliases(
950 map_bh->b_bdev, 951 map_bh->b_bdev,
951 map_bh->b_blocknr, 952 map_bh->b_blocknr,
952 map_bh->b_size >> blkbits); 953 map_bh->b_size >> i_blkbits);
953 } 954 }
954 955
955 if (!sdio->blkfactor) 956 if (!sdio->blkfactor)
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 0738f48293cc..0d8802453758 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -713,8 +713,8 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
713 } 713 }
714 sector = SECTOR_FROM_BLOCK(blkstart); 714 sector = SECTOR_FROM_BLOCK(blkstart);
715 715
716 if (sector & (bdev_zone_size(bdev) - 1) || 716 if (sector & (bdev_zone_sectors(bdev) - 1) ||
717 nr_sects != bdev_zone_size(bdev)) { 717 nr_sects != bdev_zone_sectors(bdev)) {
718 f2fs_msg(sbi->sb, KERN_INFO, 718 f2fs_msg(sbi->sb, KERN_INFO,
719 "(%d) %s: Unaligned discard attempted (block %x + %x)", 719 "(%d) %s: Unaligned discard attempted (block %x + %x)",
720 devi, sbi->s_ndevs ? FDEV(devi).path: "", 720 devi, sbi->s_ndevs ? FDEV(devi).path: "",
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 702638e21c76..46fd30d8af77 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1553,16 +1553,16 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
1553 return 0; 1553 return 0;
1554 1554
1555 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz != 1555 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
1556 SECTOR_TO_BLOCK(bdev_zone_size(bdev))) 1556 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
1557 return -EINVAL; 1557 return -EINVAL;
1558 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_size(bdev)); 1558 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
1559 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz != 1559 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
1560 __ilog2_u32(sbi->blocks_per_blkz)) 1560 __ilog2_u32(sbi->blocks_per_blkz))
1561 return -EINVAL; 1561 return -EINVAL;
1562 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz); 1562 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
1563 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >> 1563 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
1564 sbi->log_blocks_per_blkz; 1564 sbi->log_blocks_per_blkz;
1565 if (nr_sectors & (bdev_zone_size(bdev) - 1)) 1565 if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
1566 FDEV(devi).nr_blkz++; 1566 FDEV(devi).nr_blkz++;
1567 1567
1568 FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL); 1568 FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 83695641bd5e..1ca8e8fd1078 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -739,7 +739,7 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
739 } 739 }
740} 740}
741 741
742static inline unsigned int blk_queue_zone_size(struct request_queue *q) 742static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
743{ 743{
744 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; 744 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
745} 745}
@@ -1000,6 +1000,19 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1000 return blk_rq_cur_bytes(rq) >> 9; 1000 return blk_rq_cur_bytes(rq) >> 9;
1001} 1001}
1002 1002
1003/*
1004 * Some commands like WRITE SAME have a payload or data transfer size which
1005 * is different from the size of the request. Any driver that supports such
1006 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1007 * calculate the data transfer size.
1008 */
1009static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1010{
1011 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1012 return rq->special_vec.bv_len;
1013 return blk_rq_bytes(rq);
1014}
1015
1003static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 1016static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
1004 int op) 1017 int op)
1005{ 1018{
@@ -1536,12 +1549,12 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
1536 return false; 1549 return false;
1537} 1550}
1538 1551
1539static inline unsigned int bdev_zone_size(struct block_device *bdev) 1552static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
1540{ 1553{
1541 struct request_queue *q = bdev_get_queue(bdev); 1554 struct request_queue *q = bdev_get_queue(bdev);
1542 1555
1543 if (q) 1556 if (q)
1544 return blk_queue_zone_size(q); 1557 return blk_queue_zone_sectors(q);
1545 1558
1546 return 0; 1559 return 0;
1547} 1560}