aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRaju Rangoju <rajur@chelsio.com>2019-01-03 12:35:31 -0500
committerJens Axboe <axboe@kernel.dk>2019-01-23 19:16:59 -0500
commit5cbab6303b4791a3e6713dfe2c5fda6a867f9adc (patch)
tree1585daaacf96e9b150e616cff6b2a1d32aaa1fc8
parentb1064d3e337b4d0b67d641b5f771187d8f1f027d (diff)
nvmet-rdma: fix null dereference under heavy load
Under heavy load if we don't have any pre-allocated rsps left, we dynamically allocate a rsp, but we are not actually allocating memory for nvme_completion (rsp->req.rsp). In such a case, accessing pointer fields (req->rsp->status) in nvmet_req_init() will result in crash. To fix this, allocate the memory for nvme_completion by calling nvmet_rdma_alloc_rsp() Fixes: 8407879c("nvmet-rdma:fix possible bogus dereference under heavy load") Cc: <stable@vger.kernel.org> Reviewed-by: Max Gurtovoy <maxg@mellanox.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Raju Rangoju <rajur@chelsio.com> Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--drivers/nvme/target/rdma.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index a8d23eb80192..a884e3a0e8af 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
139static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); 139static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
140static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); 140static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
141static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); 141static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
142static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
143 struct nvmet_rdma_rsp *r);
144static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
145 struct nvmet_rdma_rsp *r);
142 146
143static const struct nvmet_fabrics_ops nvmet_rdma_ops; 147static const struct nvmet_fabrics_ops nvmet_rdma_ops;
144 148
@@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
182 spin_unlock_irqrestore(&queue->rsps_lock, flags); 186 spin_unlock_irqrestore(&queue->rsps_lock, flags);
183 187
184 if (unlikely(!rsp)) { 188 if (unlikely(!rsp)) {
185 rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); 189 int ret;
190
191 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
186 if (unlikely(!rsp)) 192 if (unlikely(!rsp))
187 return NULL; 193 return NULL;
194 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
195 if (unlikely(ret)) {
196 kfree(rsp);
197 return NULL;
198 }
199
188 rsp->allocated = true; 200 rsp->allocated = true;
189 } 201 }
190 202
@@ -197,6 +209,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
197 unsigned long flags; 209 unsigned long flags;
198 210
199 if (unlikely(rsp->allocated)) { 211 if (unlikely(rsp->allocated)) {
212 nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
200 kfree(rsp); 213 kfree(rsp);
201 return; 214 return;
202 } 215 }