summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-01 17:46:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-01 17:46:38 -0400
commit1cc15701cd89b0ce695bbc5cff3a2bf3e2efd25f (patch)
tree87202cdec2f83195e93ffe3c8e443ae405b7ccf8 /drivers
parent4f2ba5dc183b71362c3655b50c72f1b10ccac1c1 (diff)
parent79d73346ac05bc31f2e96f899c4e9aaaa616a8d4 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A few fixes that should go into this series: - Regression fix for ide-cd, ensuring that a request is fully initialized. From Hongxu. - Ditto fix for virtio_blk, from Bart. - NVMe fix from Keith, ensuring that we set the right block size on revalidation. If the block size changed, we'd be in trouble without it. - NVMe rdma fix from Sagi, fixing a potential hang while the controller is being removed" * 'for-linus' of git://git.kernel.dk/linux-block: ide:ide-cd: fix kernel panic resulting from missing scsi_req_init nvme: Fix setting logical block format when revalidating virtio_blk: Fix an SG_IO regression nvme-rdma: fix possible hang when issuing commands during ctrl removal
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/virtio_blk.c12
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/nvme/host/core.c1
-rw-r--r--drivers/nvme/host/rdma.c11
4 files changed, 21 insertions, 4 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 34e17ee799be..68846897d213 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -593,10 +593,22 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
593 return blk_mq_virtio_map_queues(set, vblk->vdev, 0); 593 return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
594} 594}
595 595
596#ifdef CONFIG_VIRTIO_BLK_SCSI
597static void virtblk_initialize_rq(struct request *req)
598{
599 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
600
601 scsi_req_init(&vbr->sreq);
602}
603#endif
604
596static const struct blk_mq_ops virtio_mq_ops = { 605static const struct blk_mq_ops virtio_mq_ops = {
597 .queue_rq = virtio_queue_rq, 606 .queue_rq = virtio_queue_rq,
598 .complete = virtblk_request_done, 607 .complete = virtblk_request_done,
599 .init_request = virtblk_init_request, 608 .init_request = virtblk_init_request,
609#ifdef CONFIG_VIRTIO_BLK_SCSI
610 .initialize_rq_fn = virtblk_initialize_rq,
611#endif
600 .map_queues = virtblk_map_queues, 612 .map_queues = virtblk_map_queues,
601}; 613};
602 614
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 81e18f9628d0..a7355ab3bb22 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1328,6 +1328,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
1328 unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); 1328 unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
1329 struct scsi_request *req = scsi_req(rq); 1329 struct scsi_request *req = scsi_req(rq);
1330 1330
1331 scsi_req_init(req);
1331 memset(req->cmd, 0, BLK_MAX_CDB); 1332 memset(req->cmd, 0, BLK_MAX_CDB);
1332 1333
1333 if (rq_data_dir(rq) == READ) 1334 if (rq_data_dir(rq) == READ)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 5a14cc7f28ee..37f9039bb9ca 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1249,6 +1249,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
1249 goto out; 1249 goto out;
1250 } 1250 }
1251 1251
1252 __nvme_revalidate_disk(disk, id);
1252 nvme_report_ns_ids(ctrl, ns->ns_id, id, eui64, nguid, &uuid); 1253 nvme_report_ns_ids(ctrl, ns->ns_id, id, eui64, nguid, &uuid);
1253 if (!uuid_equal(&ns->uuid, &uuid) || 1254 if (!uuid_equal(&ns->uuid, &uuid) ||
1254 memcmp(&ns->nguid, &nguid, sizeof(ns->nguid)) || 1255 memcmp(&ns->nguid, &nguid, sizeof(ns->nguid)) ||
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 87bac27ec64b..0ebb539f3bd3 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1614,12 +1614,15 @@ nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
1614 /* 1614 /*
1615 * reconnecting state means transport disruption, which 1615 * reconnecting state means transport disruption, which
1616 * can take a long time and even might fail permanently, 1616 * can take a long time and even might fail permanently,
1617 * so we can't let incoming I/O be requeued forever. 1617 * fail fast to give upper layers a chance to failover.
1618 * fail it fast to allow upper layers a chance to 1618 * deleting state means that the ctrl will never accept
1619 * failover. 1619 * commands again, fail it permanently.
1620 */ 1620 */
1621 if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING) 1621 if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING ||
1622 queue->ctrl->ctrl.state == NVME_CTRL_DELETING) {
1623 nvme_req(rq)->status = NVME_SC_ABORT_REQ;
1622 return BLK_STS_IOERR; 1624 return BLK_STS_IOERR;
1625 }
1623 return BLK_STS_RESOURCE; /* try again later */ 1626 return BLK_STS_RESOURCE; /* try again later */
1624 } 1627 }
1625 } 1628 }