diff options
author | Bart Van Assche <bart.vanassche@sandisk.com> | 2016-07-21 16:03:09 -0400 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-08-02 12:02:41 -0400 |
commit | eaa74ec7329a48a4b724d8de440b3a2cbaabf7c8 (patch) | |
tree | e8d87bbfa7811c8c7ce7eb601fd522177c80a831 | |
parent | 523d939ef98fd712632d93a5a2b588e477a7565e (diff) |
IB/core: Make rdma_rw_ctx_init() initialize all used fields
Some but not all callers of rdma_rw_ctx_init() zero-initialize
struct rdma_rw_ctx. Hence make rdma_rw_ctx_init() initialize all
work request fields that will be read by ib_post_send().
Fixes: a060b5629ab0 ("IB/core: generic RDMA READ/WRITE API")
Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Tested-by: Steve Wise <swise@opengridcomputing.com>
Tested-by: Laurence Oberman <loberman@redhat.com>
Cc: Parav Pandit <pandit.parav@gmail.com>
Cc: Nicholas Bellinger <nab@linux-iscsi.org>
Cc: <stable@vger.kernel.org> #v4.7+
Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r-- | drivers/infiniband/core/rw.c | 14 |
1 files changed, 9 insertions, 5 deletions
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index 1eb9b1294a63..1ad2baaa6c8c 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c | |||
@@ -71,6 +71,7 @@ static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev) | |||
71 | return min_t(u32, dev->attrs.max_fast_reg_page_list_len, 256); | 71 | return min_t(u32, dev->attrs.max_fast_reg_page_list_len, 256); |
72 | } | 72 | } |
73 | 73 | ||
74 | /* Caller must have zero-initialized *reg. */ | ||
74 | static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num, | 75 | static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num, |
75 | struct rdma_rw_reg_ctx *reg, struct scatterlist *sg, | 76 | struct rdma_rw_reg_ctx *reg, struct scatterlist *sg, |
76 | u32 sg_cnt, u32 offset) | 77 | u32 sg_cnt, u32 offset) |
@@ -114,6 +115,7 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |||
114 | u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset, | 115 | u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset, |
115 | u64 remote_addr, u32 rkey, enum dma_data_direction dir) | 116 | u64 remote_addr, u32 rkey, enum dma_data_direction dir) |
116 | { | 117 | { |
118 | struct rdma_rw_reg_ctx *prev = NULL; | ||
117 | u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device); | 119 | u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device); |
118 | int i, j, ret = 0, count = 0; | 120 | int i, j, ret = 0, count = 0; |
119 | 121 | ||
@@ -125,7 +127,6 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |||
125 | } | 127 | } |
126 | 128 | ||
127 | for (i = 0; i < ctx->nr_ops; i++) { | 129 | for (i = 0; i < ctx->nr_ops; i++) { |
128 | struct rdma_rw_reg_ctx *prev = i ? &ctx->reg[i - 1] : NULL; | ||
129 | struct rdma_rw_reg_ctx *reg = &ctx->reg[i]; | 130 | struct rdma_rw_reg_ctx *reg = &ctx->reg[i]; |
130 | u32 nents = min(sg_cnt, pages_per_mr); | 131 | u32 nents = min(sg_cnt, pages_per_mr); |
131 | 132 | ||
@@ -162,9 +163,13 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |||
162 | sg_cnt -= nents; | 163 | sg_cnt -= nents; |
163 | for (j = 0; j < nents; j++) | 164 | for (j = 0; j < nents; j++) |
164 | sg = sg_next(sg); | 165 | sg = sg_next(sg); |
166 | prev = reg; | ||
165 | offset = 0; | 167 | offset = 0; |
166 | } | 168 | } |
167 | 169 | ||
170 | if (prev) | ||
171 | prev->wr.wr.next = NULL; | ||
172 | |||
168 | ctx->type = RDMA_RW_MR; | 173 | ctx->type = RDMA_RW_MR; |
169 | return count; | 174 | return count; |
170 | 175 | ||
@@ -205,11 +210,10 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |||
205 | rdma_wr->wr.opcode = IB_WR_RDMA_READ; | 210 | rdma_wr->wr.opcode = IB_WR_RDMA_READ; |
206 | rdma_wr->remote_addr = remote_addr + total_len; | 211 | rdma_wr->remote_addr = remote_addr + total_len; |
207 | rdma_wr->rkey = rkey; | 212 | rdma_wr->rkey = rkey; |
213 | rdma_wr->wr.num_sge = nr_sge; | ||
208 | rdma_wr->wr.sg_list = sge; | 214 | rdma_wr->wr.sg_list = sge; |
209 | 215 | ||
210 | for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) { | 216 | for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) { |
211 | rdma_wr->wr.num_sge++; | ||
212 | |||
213 | sge->addr = ib_sg_dma_address(dev, sg) + offset; | 217 | sge->addr = ib_sg_dma_address(dev, sg) + offset; |
214 | sge->length = ib_sg_dma_len(dev, sg) - offset; | 218 | sge->length = ib_sg_dma_len(dev, sg) - offset; |
215 | sge->lkey = qp->pd->local_dma_lkey; | 219 | sge->lkey = qp->pd->local_dma_lkey; |
@@ -220,8 +224,8 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |||
220 | offset = 0; | 224 | offset = 0; |
221 | } | 225 | } |
222 | 226 | ||
223 | if (i + 1 < ctx->nr_ops) | 227 | rdma_wr->wr.next = i + 1 < ctx->nr_ops ? |
224 | rdma_wr->wr.next = &ctx->map.wrs[i + 1].wr; | 228 | &ctx->map.wrs[i + 1].wr : NULL; |
225 | } | 229 | } |
226 | 230 | ||
227 | ctx->type = RDMA_RW_MULTI_WR; | 231 | ctx->type = RDMA_RW_MULTI_WR; |