aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-25 14:08:35 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-25 14:08:35 -0500
commit9b81d512a4a89dadfd3887d1b4443eeb2c20c573 (patch)
treef43501b249d8935bd228d00a328d7f7ef37dfcf5
parent4cf193b4b2363bfed0b4e040e61f20d78192e2e0 (diff)
parentdcd8376c369fa8fde8269e721b14f50475dd397b (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull more block layer fixes from Jens Axboe: "I wasn't going to send off a new pull before next week, but the blk flush fix from Jan from the other day introduced a regression. It's rare enough not to have hit during testing, since it requires both a device that rejects the first flush, and bad timing while it does that. But since someone did hit it, let's get the revert into 4.4-rc3 so we don't have a released rc with that known issue. Apart from that revert, three other fixes: - From Christoph, a fix for a missing unmap in NVMe request preparation. - An NVMe fix from Nishanth that fixes data corruption on powerpc. - Also from Christoph, fix a list_del() attempt on blk-mq that didn't have a matching list_add() at timer start" * 'for-linus' of git://git.kernel.dk/linux-block: Revert "blk-flush: Queue through IO scheduler when flush not required" block: fix blk_abort_request for blk-mq drivers nvme: add missing unmaps in nvme_queue_rq NVMe: default to 4k device page size
-rw-r--r--block/blk-flush.c2
-rw-r--r--block/blk-timeout.c8
-rw-r--r--drivers/nvme/host/pci.c30
3 files changed, 24 insertions, 16 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index c81d56ec308f..9c423e53324a 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -422,7 +422,7 @@ void blk_insert_flush(struct request *rq)
422 if (q->mq_ops) { 422 if (q->mq_ops) {
423 blk_mq_insert_request(rq, false, false, true); 423 blk_mq_insert_request(rq, false, false, true);
424 } else 424 } else
425 q->elevator->type->ops.elevator_add_req_fn(q, rq); 425 list_add_tail(&rq->queuelist, &q->queue_head);
426 return; 426 return;
427 } 427 }
428 428
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 246dfb16c3d9..aa40aa93381b 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -158,11 +158,13 @@ void blk_abort_request(struct request *req)
158{ 158{
159 if (blk_mark_rq_complete(req)) 159 if (blk_mark_rq_complete(req))
160 return; 160 return;
161 blk_delete_timer(req); 161
162 if (req->q->mq_ops) 162 if (req->q->mq_ops) {
163 blk_mq_rq_timed_out(req, false); 163 blk_mq_rq_timed_out(req, false);
164 else 164 } else {
165 blk_delete_timer(req);
165 blk_rq_timed_out(req); 166 blk_rq_timed_out(req);
167 }
166} 168}
167EXPORT_SYMBOL_GPL(blk_abort_request); 169EXPORT_SYMBOL_GPL(blk_abort_request);
168 170
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 930042fa2d69..f3b53af789ef 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -896,19 +896,28 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
896 goto retry_cmd; 896 goto retry_cmd;
897 } 897 }
898 if (blk_integrity_rq(req)) { 898 if (blk_integrity_rq(req)) {
899 if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) 899 if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) {
900 dma_unmap_sg(dev->dev, iod->sg, iod->nents,
901 dma_dir);
900 goto error_cmd; 902 goto error_cmd;
903 }
901 904
902 sg_init_table(iod->meta_sg, 1); 905 sg_init_table(iod->meta_sg, 1);
903 if (blk_rq_map_integrity_sg( 906 if (blk_rq_map_integrity_sg(
904 req->q, req->bio, iod->meta_sg) != 1) 907 req->q, req->bio, iod->meta_sg) != 1) {
908 dma_unmap_sg(dev->dev, iod->sg, iod->nents,
909 dma_dir);
905 goto error_cmd; 910 goto error_cmd;
911 }
906 912
907 if (rq_data_dir(req)) 913 if (rq_data_dir(req))
908 nvme_dif_remap(req, nvme_dif_prep); 914 nvme_dif_remap(req, nvme_dif_prep);
909 915
910 if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) 916 if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) {
917 dma_unmap_sg(dev->dev, iod->sg, iod->nents,
918 dma_dir);
911 goto error_cmd; 919 goto error_cmd;
920 }
912 } 921 }
913 } 922 }
914 923
@@ -1728,9 +1737,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1728 u32 aqa; 1737 u32 aqa;
1729 u64 cap = lo_hi_readq(&dev->bar->cap); 1738 u64 cap = lo_hi_readq(&dev->bar->cap);
1730 struct nvme_queue *nvmeq; 1739 struct nvme_queue *nvmeq;
1731 unsigned page_shift = PAGE_SHIFT; 1740 /*
1741 * default to a 4K page size, with the intention to update this
1742 * path in the future to accomodate architectures with differing
1743 * kernel and IO page sizes.
1744 */
1745 unsigned page_shift = 12;
1732 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; 1746 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
1733 unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
1734 1747
1735 if (page_shift < dev_page_min) { 1748 if (page_shift < dev_page_min) {
1736 dev_err(dev->dev, 1749 dev_err(dev->dev,
@@ -1739,13 +1752,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1739 1 << page_shift); 1752 1 << page_shift);
1740 return -ENODEV; 1753 return -ENODEV;
1741 } 1754 }
1742 if (page_shift > dev_page_max) {
1743 dev_info(dev->dev,
1744 "Device maximum page size (%u) smaller than "
1745 "host (%u); enabling work-around\n",
1746 1 << dev_page_max, 1 << page_shift);
1747 page_shift = dev_page_max;
1748 }
1749 1755
1750 dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ? 1756 dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
1751 NVME_CAP_NSSRC(cap) : 0; 1757 NVME_CAP_NSSRC(cap) : 0;