summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-30 13:47:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-30 13:47:46 -0400
commite6e5bec43c0d5dec97355ebf9f6c9bbf4d4c29d5 (patch)
tree60b8cafa37665a2465b2d8902fa1b97a73c68bfe
parent1904148a361a07fb2d7cba1261d1d2c2f33c8d2e (diff)
parent9544bc5347207a68eb308cc8aaaed6c3a687cabd (diff)
Merge tag 'for-linus-20180629' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "Small set of fixes for this series. Mostly just minor fixes, the only oddball in here is the sg change. The sg change came out of the stall fix for NVMe, where we added a mempool and limited us to a single page allocation. CONFIG_SG_DEBUG sort-of ruins that, since we'd need to account for that. That's actually a generic problem, since lots of drivers need to allocate SG lists. So this just removes support for CONFIG_SG_DEBUG, which I added back in 2007 and to my knowledge it was never useful. Anyway, outside of that, this pull contains: - clone of request with special payload fix (Bart) - drbd discard handling fix (Bart) - SATA blk-mq stall fix (me) - chunk size fix (Keith) - double free nvme rdma fix (Sagi)" * tag 'for-linus-20180629' of git://git.kernel.dk/linux-block: sg: remove ->sg_magic member drbd: Fix drbd_request_prepare() discard handling blk-mq: don't queue more if we get a busy return block: Fix cloning of requests with a special payload nvme-rdma: fix possible double free of controller async event buffer block: Fix transfer when chunk sectors exceeds max
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-mq.c12
-rw-r--r--drivers/block/drbd/drbd_req.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/nvme/host/rdma.c7
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/scatterlist.h18
-rw-r--r--lib/scatterlist.c6
-rw-r--r--tools/virtio/linux/scatterlist.h18
9 files changed, 25 insertions, 51 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index afd2596ea3d3..f84a9b7b6f5a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3473,6 +3473,10 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
3473 dst->cpu = src->cpu; 3473 dst->cpu = src->cpu;
3474 dst->__sector = blk_rq_pos(src); 3474 dst->__sector = blk_rq_pos(src);
3475 dst->__data_len = blk_rq_bytes(src); 3475 dst->__data_len = blk_rq_bytes(src);
3476 if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
3477 dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
3478 dst->special_vec = src->special_vec;
3479 }
3476 dst->nr_phys_segments = src->nr_phys_segments; 3480 dst->nr_phys_segments = src->nr_phys_segments;
3477 dst->ioprio = src->ioprio; 3481 dst->ioprio = src->ioprio;
3478 dst->extra_len = src->extra_len; 3482 dst->extra_len = src->extra_len;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b429d515b568..95919268564b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1075,6 +1075,9 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
1075 1075
1076#define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ 1076#define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
1077 1077
1078/*
1079 * Returns true if we did some work AND can potentially do more.
1080 */
1078bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, 1081bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1079 bool got_budget) 1082 bool got_budget)
1080{ 1083{
@@ -1205,8 +1208,17 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1205 blk_mq_run_hw_queue(hctx, true); 1208 blk_mq_run_hw_queue(hctx, true);
1206 else if (needs_restart && (ret == BLK_STS_RESOURCE)) 1209 else if (needs_restart && (ret == BLK_STS_RESOURCE))
1207 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); 1210 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1211
1212 return false;
1208 } 1213 }
1209 1214
1215 /*
1216 * If the host/device is unable to accept more work, inform the
1217 * caller of that.
1218 */
1219 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1220 return false;
1221
1210 return (queued + errors) != 0; 1222 return (queued + errors) != 0;
1211} 1223}
1212 1224
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index a47e4987ee46..d146fedc38bb 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1244,8 +1244,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
1244 _drbd_start_io_acct(device, req); 1244 _drbd_start_io_acct(device, req);
1245 1245
1246 /* process discards always from our submitter thread */ 1246 /* process discards always from our submitter thread */
1247 if ((bio_op(bio) & REQ_OP_WRITE_ZEROES) || 1247 if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
1248 (bio_op(bio) & REQ_OP_DISCARD)) 1248 bio_op(bio) == REQ_OP_DISCARD)
1249 goto queue_for_submitter_thread; 1249 goto queue_for_submitter_thread;
1250 1250
1251 if (rw == WRITE && req->private_bio && req->i.size 1251 if (rw == WRITE && req->private_bio && req->i.size
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7014a96546f4..52f3b91d14fd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2245,9 +2245,6 @@ static inline struct scatterlist *____sg_next(struct scatterlist *sg)
2245 **/ 2245 **/
2246static inline struct scatterlist *__sg_next(struct scatterlist *sg) 2246static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2247{ 2247{
2248#ifdef CONFIG_DEBUG_SG
2249 BUG_ON(sg->sg_magic != SG_MAGIC);
2250#endif
2251 return sg_is_last(sg) ? NULL : ____sg_next(sg); 2248 return sg_is_last(sg) ? NULL : ____sg_next(sg);
2252} 2249}
2253 2250
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 9544625c0b7d..518c5b09038c 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -732,8 +732,11 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
732 blk_cleanup_queue(ctrl->ctrl.admin_q); 732 blk_cleanup_queue(ctrl->ctrl.admin_q);
733 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); 733 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
734 } 734 }
735 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, 735 if (ctrl->async_event_sqe.data) {
736 sizeof(struct nvme_command), DMA_TO_DEVICE); 736 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
737 sizeof(struct nvme_command), DMA_TO_DEVICE);
738 ctrl->async_event_sqe.data = NULL;
739 }
737 nvme_rdma_free_queue(&ctrl->queues[0]); 740 nvme_rdma_free_queue(&ctrl->queues[0]);
738} 741}
739 742
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 9154570edf29..79226ca8f80f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1119,8 +1119,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
1119 if (!q->limits.chunk_sectors) 1119 if (!q->limits.chunk_sectors)
1120 return q->limits.max_sectors; 1120 return q->limits.max_sectors;
1121 1121
1122 return q->limits.chunk_sectors - 1122 return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
1123 (offset & (q->limits.chunk_sectors - 1)); 1123 (offset & (q->limits.chunk_sectors - 1))));
1124} 1124}
1125 1125
1126static inline unsigned int blk_rq_get_max_sectors(struct request *rq, 1126static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 51f52020ad5f..093aa57120b0 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -9,9 +9,6 @@
9#include <asm/io.h> 9#include <asm/io.h>
10 10
11struct scatterlist { 11struct scatterlist {
12#ifdef CONFIG_DEBUG_SG
13 unsigned long sg_magic;
14#endif
15 unsigned long page_link; 12 unsigned long page_link;
16 unsigned int offset; 13 unsigned int offset;
17 unsigned int length; 14 unsigned int length;
@@ -64,7 +61,6 @@ struct sg_table {
64 * 61 *
65 */ 62 */
66 63
67#define SG_MAGIC 0x87654321
68#define SG_CHAIN 0x01UL 64#define SG_CHAIN 0x01UL
69#define SG_END 0x02UL 65#define SG_END 0x02UL
70 66
@@ -98,7 +94,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
98 */ 94 */
99 BUG_ON((unsigned long) page & (SG_CHAIN | SG_END)); 95 BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
100#ifdef CONFIG_DEBUG_SG 96#ifdef CONFIG_DEBUG_SG
101 BUG_ON(sg->sg_magic != SG_MAGIC);
102 BUG_ON(sg_is_chain(sg)); 97 BUG_ON(sg_is_chain(sg));
103#endif 98#endif
104 sg->page_link = page_link | (unsigned long) page; 99 sg->page_link = page_link | (unsigned long) page;
@@ -129,7 +124,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
129static inline struct page *sg_page(struct scatterlist *sg) 124static inline struct page *sg_page(struct scatterlist *sg)
130{ 125{
131#ifdef CONFIG_DEBUG_SG 126#ifdef CONFIG_DEBUG_SG
132 BUG_ON(sg->sg_magic != SG_MAGIC);
133 BUG_ON(sg_is_chain(sg)); 127 BUG_ON(sg_is_chain(sg));
134#endif 128#endif
135 return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END)); 129 return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
@@ -195,9 +189,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
195 **/ 189 **/
196static inline void sg_mark_end(struct scatterlist *sg) 190static inline void sg_mark_end(struct scatterlist *sg)
197{ 191{
198#ifdef CONFIG_DEBUG_SG
199 BUG_ON(sg->sg_magic != SG_MAGIC);
200#endif
201 /* 192 /*
202 * Set termination bit, clear potential chain bit 193 * Set termination bit, clear potential chain bit
203 */ 194 */
@@ -215,9 +206,6 @@ static inline void sg_mark_end(struct scatterlist *sg)
215 **/ 206 **/
216static inline void sg_unmark_end(struct scatterlist *sg) 207static inline void sg_unmark_end(struct scatterlist *sg)
217{ 208{
218#ifdef CONFIG_DEBUG_SG
219 BUG_ON(sg->sg_magic != SG_MAGIC);
220#endif
221 sg->page_link &= ~SG_END; 209 sg->page_link &= ~SG_END;
222} 210}
223 211
@@ -260,12 +248,6 @@ static inline void *sg_virt(struct scatterlist *sg)
260static inline void sg_init_marker(struct scatterlist *sgl, 248static inline void sg_init_marker(struct scatterlist *sgl,
261 unsigned int nents) 249 unsigned int nents)
262{ 250{
263#ifdef CONFIG_DEBUG_SG
264 unsigned int i;
265
266 for (i = 0; i < nents; i++)
267 sgl[i].sg_magic = SG_MAGIC;
268#endif
269 sg_mark_end(&sgl[nents - 1]); 251 sg_mark_end(&sgl[nents - 1]);
270} 252}
271 253
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 1642fd507a96..7c6096a71704 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -24,9 +24,6 @@
24 **/ 24 **/
25struct scatterlist *sg_next(struct scatterlist *sg) 25struct scatterlist *sg_next(struct scatterlist *sg)
26{ 26{
27#ifdef CONFIG_DEBUG_SG
28 BUG_ON(sg->sg_magic != SG_MAGIC);
29#endif
30 if (sg_is_last(sg)) 27 if (sg_is_last(sg))
31 return NULL; 28 return NULL;
32 29
@@ -111,10 +108,7 @@ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
111 for_each_sg(sgl, sg, nents, i) 108 for_each_sg(sgl, sg, nents, i)
112 ret = sg; 109 ret = sg;
113 110
114#ifdef CONFIG_DEBUG_SG
115 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
116 BUG_ON(!sg_is_last(ret)); 111 BUG_ON(!sg_is_last(ret));
117#endif
118 return ret; 112 return ret;
119} 113}
120EXPORT_SYMBOL(sg_last); 114EXPORT_SYMBOL(sg_last);
diff --git a/tools/virtio/linux/scatterlist.h b/tools/virtio/linux/scatterlist.h
index 9a45f90e2d08..369ee308b668 100644
--- a/tools/virtio/linux/scatterlist.h
+++ b/tools/virtio/linux/scatterlist.h
@@ -36,7 +36,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
36 */ 36 */
37 BUG_ON((unsigned long) page & 0x03); 37 BUG_ON((unsigned long) page & 0x03);
38#ifdef CONFIG_DEBUG_SG 38#ifdef CONFIG_DEBUG_SG
39 BUG_ON(sg->sg_magic != SG_MAGIC);
40 BUG_ON(sg_is_chain(sg)); 39 BUG_ON(sg_is_chain(sg));
41#endif 40#endif
42 sg->page_link = page_link | (unsigned long) page; 41 sg->page_link = page_link | (unsigned long) page;
@@ -67,7 +66,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
67static inline struct page *sg_page(struct scatterlist *sg) 66static inline struct page *sg_page(struct scatterlist *sg)
68{ 67{
69#ifdef CONFIG_DEBUG_SG 68#ifdef CONFIG_DEBUG_SG
70 BUG_ON(sg->sg_magic != SG_MAGIC);
71 BUG_ON(sg_is_chain(sg)); 69 BUG_ON(sg_is_chain(sg));
72#endif 70#endif
73 return (struct page *)((sg)->page_link & ~0x3); 71 return (struct page *)((sg)->page_link & ~0x3);
@@ -116,9 +114,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
116 **/ 114 **/
117static inline void sg_mark_end(struct scatterlist *sg) 115static inline void sg_mark_end(struct scatterlist *sg)
118{ 116{
119#ifdef CONFIG_DEBUG_SG
120 BUG_ON(sg->sg_magic != SG_MAGIC);
121#endif
122 /* 117 /*
123 * Set termination bit, clear potential chain bit 118 * Set termination bit, clear potential chain bit
124 */ 119 */
@@ -136,17 +131,11 @@ static inline void sg_mark_end(struct scatterlist *sg)
136 **/ 131 **/
137static inline void sg_unmark_end(struct scatterlist *sg) 132static inline void sg_unmark_end(struct scatterlist *sg)
138{ 133{
139#ifdef CONFIG_DEBUG_SG
140 BUG_ON(sg->sg_magic != SG_MAGIC);
141#endif
142 sg->page_link &= ~0x02; 134 sg->page_link &= ~0x02;
143} 135}
144 136
145static inline struct scatterlist *sg_next(struct scatterlist *sg) 137static inline struct scatterlist *sg_next(struct scatterlist *sg)
146{ 138{
147#ifdef CONFIG_DEBUG_SG
148 BUG_ON(sg->sg_magic != SG_MAGIC);
149#endif
150 if (sg_is_last(sg)) 139 if (sg_is_last(sg))
151 return NULL; 140 return NULL;
152 141
@@ -160,13 +149,6 @@ static inline struct scatterlist *sg_next(struct scatterlist *sg)
160static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents) 149static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
161{ 150{
162 memset(sgl, 0, sizeof(*sgl) * nents); 151 memset(sgl, 0, sizeof(*sgl) * nents);
163#ifdef CONFIG_DEBUG_SG
164 {
165 unsigned int i;
166 for (i = 0; i < nents; i++)
167 sgl[i].sg_magic = SG_MAGIC;
168 }
169#endif
170 sg_mark_end(&sgl[nents - 1]); 152 sg_mark_end(&sgl[nents - 1]);
171} 153}
172 154