aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-30 13:47:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-30 13:47:46 -0400
commite6e5bec43c0d5dec97355ebf9f6c9bbf4d4c29d5 (patch)
tree60b8cafa37665a2465b2d8902fa1b97a73c68bfe /drivers
parent1904148a361a07fb2d7cba1261d1d2c2f33c8d2e (diff)
parent9544bc5347207a68eb308cc8aaaed6c3a687cabd (diff)
Merge tag 'for-linus-20180629' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "Small set of fixes for this series. Mostly just minor fixes, the only oddball in here is the sg change. The sg change came out of the stall fix for NVMe, where we added a mempool and limited us to a single page allocation. CONFIG_SG_DEBUG sort-of ruins that, since we'd need to account for that. That's actually a generic problem, since lots of drivers need to allocate SG lists. So this just removes support for CONFIG_SG_DEBUG, which I added back in 2007 and to my knowledge it was never useful. Anyway, outside of that, this pull contains: - clone of request with special payload fix (Bart) - drbd discard handling fix (Bart) - SATA blk-mq stall fix (me) - chunk size fix (Keith) - double free nvme rdma fix (Sagi)" * tag 'for-linus-20180629' of git://git.kernel.dk/linux-block: sg: remove ->sg_magic member drbd: Fix drbd_request_prepare() discard handling blk-mq: don't queue more if we get a busy return block: Fix cloning of requests with a special payload nvme-rdma: fix possible double free of controller async event buffer block: Fix transfer when chunk sectors exceeds max
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/drbd/drbd_req.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/nvme/host/rdma.c7
3 files changed, 7 insertions, 7 deletions
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index a47e4987ee46..d146fedc38bb 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1244,8 +1244,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
1244 _drbd_start_io_acct(device, req); 1244 _drbd_start_io_acct(device, req);
1245 1245
1246 /* process discards always from our submitter thread */ 1246 /* process discards always from our submitter thread */
1247 if ((bio_op(bio) & REQ_OP_WRITE_ZEROES) || 1247 if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
1248 (bio_op(bio) & REQ_OP_DISCARD)) 1248 bio_op(bio) == REQ_OP_DISCARD)
1249 goto queue_for_submitter_thread; 1249 goto queue_for_submitter_thread;
1250 1250
1251 if (rw == WRITE && req->private_bio && req->i.size 1251 if (rw == WRITE && req->private_bio && req->i.size
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7014a96546f4..52f3b91d14fd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2245,9 +2245,6 @@ static inline struct scatterlist *____sg_next(struct scatterlist *sg)
2245 **/ 2245 **/
2246static inline struct scatterlist *__sg_next(struct scatterlist *sg) 2246static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2247{ 2247{
2248#ifdef CONFIG_DEBUG_SG
2249 BUG_ON(sg->sg_magic != SG_MAGIC);
2250#endif
2251 return sg_is_last(sg) ? NULL : ____sg_next(sg); 2248 return sg_is_last(sg) ? NULL : ____sg_next(sg);
2252} 2249}
2253 2250
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 9544625c0b7d..518c5b09038c 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -732,8 +732,11 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
732 blk_cleanup_queue(ctrl->ctrl.admin_q); 732 blk_cleanup_queue(ctrl->ctrl.admin_q);
733 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); 733 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
734 } 734 }
735 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, 735 if (ctrl->async_event_sqe.data) {
736 sizeof(struct nvme_command), DMA_TO_DEVICE); 736 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
737 sizeof(struct nvme_command), DMA_TO_DEVICE);
738 ctrl->async_event_sqe.data = NULL;
739 }
737 nvme_rdma_free_queue(&ctrl->queues[0]); 740 nvme_rdma_free_queue(&ctrl->queues[0]);
738} 741}
739 742