diff options
author | Bryan Tan <bryantan@vmware.com> | 2017-12-20 12:51:40 -0500 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2017-12-21 18:06:07 -0500 |
commit | e3524b269e451cff68b19f32b15448933a53a4f4 (patch) | |
tree | 4ced0a83b6f8adcea83e478bfba004780ba71901 | |
parent | 30a366a9dabd05a0d218288b7d732649886b6a53 (diff) |
RDMA/vmw_pvrdma: Avoid use after free due to QP/CQ/SRQ destroy
The use of wait queues in vmw_pvrdma for handling concurrent
access to a resource leaves a race condition which can cause a use
after free bug.
Fix this by using the pattern from other drivers, complete() protected by
dec_and_test to ensure complete() is called only once.
Fixes: 29c8d9eba550 ("IB: Add vmw_pvrdma driver")
Signed-off-by: Bryan Tan <bryantan@vmware.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
-rw-r--r-- | drivers/infiniband/hw/vmw_pvrdma/pvrdma.h | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | 17 | ||||
-rw-r--r-- | drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c | 7 |
5 files changed, 22 insertions, 22 deletions
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 63bc2efc34eb..4f7bd3b6a315 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h | |||
@@ -94,7 +94,7 @@ struct pvrdma_cq { | |||
94 | u32 cq_handle; | 94 | u32 cq_handle; |
95 | bool is_kernel; | 95 | bool is_kernel; |
96 | atomic_t refcnt; | 96 | atomic_t refcnt; |
97 | wait_queue_head_t wait; | 97 | struct completion free; |
98 | }; | 98 | }; |
99 | 99 | ||
100 | struct pvrdma_id_table { | 100 | struct pvrdma_id_table { |
@@ -175,7 +175,7 @@ struct pvrdma_srq { | |||
175 | u32 srq_handle; | 175 | u32 srq_handle; |
176 | int npages; | 176 | int npages; |
177 | refcount_t refcnt; | 177 | refcount_t refcnt; |
178 | wait_queue_head_t wait; | 178 | struct completion free; |
179 | }; | 179 | }; |
180 | 180 | ||
181 | struct pvrdma_qp { | 181 | struct pvrdma_qp { |
@@ -197,7 +197,7 @@ struct pvrdma_qp { | |||
197 | bool is_kernel; | 197 | bool is_kernel; |
198 | struct mutex mutex; /* QP state mutex. */ | 198 | struct mutex mutex; /* QP state mutex. */ |
199 | atomic_t refcnt; | 199 | atomic_t refcnt; |
200 | wait_queue_head_t wait; | 200 | struct completion free; |
201 | }; | 201 | }; |
202 | 202 | ||
203 | struct pvrdma_dev { | 203 | struct pvrdma_dev { |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c index 3562c0c30492..e529622cefad 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c | |||
@@ -179,7 +179,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, | |||
179 | pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0); | 179 | pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0); |
180 | 180 | ||
181 | atomic_set(&cq->refcnt, 1); | 181 | atomic_set(&cq->refcnt, 1); |
182 | init_waitqueue_head(&cq->wait); | 182 | init_completion(&cq->free); |
183 | spin_lock_init(&cq->cq_lock); | 183 | spin_lock_init(&cq->cq_lock); |
184 | 184 | ||
185 | memset(cmd, 0, sizeof(*cmd)); | 185 | memset(cmd, 0, sizeof(*cmd)); |
@@ -230,8 +230,9 @@ err_cq: | |||
230 | 230 | ||
231 | static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq) | 231 | static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq) |
232 | { | 232 | { |
233 | atomic_dec(&cq->refcnt); | 233 | if (atomic_dec_and_test(&cq->refcnt)) |
234 | wait_event(cq->wait, !atomic_read(&cq->refcnt)); | 234 | complete(&cq->free); |
235 | wait_for_completion(&cq->free); | ||
235 | 236 | ||
236 | if (!cq->is_kernel) | 237 | if (!cq->is_kernel) |
237 | ib_umem_release(cq->umem); | 238 | ib_umem_release(cq->umem); |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index 1f4e18717a00..e92681878c93 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | |||
@@ -346,9 +346,8 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type) | |||
346 | ibqp->event_handler(&e, ibqp->qp_context); | 346 | ibqp->event_handler(&e, ibqp->qp_context); |
347 | } | 347 | } |
348 | if (qp) { | 348 | if (qp) { |
349 | atomic_dec(&qp->refcnt); | 349 | if (atomic_dec_and_test(&qp->refcnt)) |
350 | if (atomic_read(&qp->refcnt) == 0) | 350 | complete(&qp->free); |
351 | wake_up(&qp->wait); | ||
352 | } | 351 | } |
353 | } | 352 | } |
354 | 353 | ||
@@ -373,9 +372,8 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type) | |||
373 | ibcq->event_handler(&e, ibcq->cq_context); | 372 | ibcq->event_handler(&e, ibcq->cq_context); |
374 | } | 373 | } |
375 | if (cq) { | 374 | if (cq) { |
376 | atomic_dec(&cq->refcnt); | 375 | if (atomic_dec_and_test(&cq->refcnt)) |
377 | if (atomic_read(&cq->refcnt) == 0) | 376 | complete(&cq->free); |
378 | wake_up(&cq->wait); | ||
379 | } | 377 | } |
380 | } | 378 | } |
381 | 379 | ||
@@ -404,7 +402,7 @@ static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type) | |||
404 | } | 402 | } |
405 | if (srq) { | 403 | if (srq) { |
406 | if (refcount_dec_and_test(&srq->refcnt)) | 404 | if (refcount_dec_and_test(&srq->refcnt)) |
407 | wake_up(&srq->wait); | 405 | complete(&srq->free); |
408 | } | 406 | } |
409 | } | 407 | } |
410 | 408 | ||
@@ -539,9 +537,8 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id) | |||
539 | if (cq && cq->ibcq.comp_handler) | 537 | if (cq && cq->ibcq.comp_handler) |
540 | cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); | 538 | cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); |
541 | if (cq) { | 539 | if (cq) { |
542 | atomic_dec(&cq->refcnt); | 540 | if (atomic_dec_and_test(&cq->refcnt)) |
543 | if (atomic_read(&cq->refcnt)) | 541 | complete(&cq->free); |
544 | wake_up(&cq->wait); | ||
545 | } | 542 | } |
546 | pvrdma_idx_ring_inc(&ring->cons_head, ring_slots); | 543 | pvrdma_idx_ring_inc(&ring->cons_head, ring_slots); |
547 | } | 544 | } |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index dceebc623d96..4059308e1454 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | |||
@@ -246,7 +246,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, | |||
246 | spin_lock_init(&qp->rq.lock); | 246 | spin_lock_init(&qp->rq.lock); |
247 | mutex_init(&qp->mutex); | 247 | mutex_init(&qp->mutex); |
248 | atomic_set(&qp->refcnt, 1); | 248 | atomic_set(&qp->refcnt, 1); |
249 | init_waitqueue_head(&qp->wait); | 249 | init_completion(&qp->free); |
250 | 250 | ||
251 | qp->state = IB_QPS_RESET; | 251 | qp->state = IB_QPS_RESET; |
252 | 252 | ||
@@ -428,8 +428,9 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp) | |||
428 | 428 | ||
429 | pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags); | 429 | pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags); |
430 | 430 | ||
431 | atomic_dec(&qp->refcnt); | 431 | if (atomic_dec_and_test(&qp->refcnt)) |
432 | wait_event(qp->wait, !atomic_read(&qp->refcnt)); | 432 | complete(&qp->free); |
433 | wait_for_completion(&qp->free); | ||
433 | 434 | ||
434 | if (!qp->is_kernel) { | 435 | if (!qp->is_kernel) { |
435 | if (qp->rumem) | 436 | if (qp->rumem) |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c index a2b1a3c115f2..5acebb1ef631 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c | |||
@@ -149,7 +149,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, | |||
149 | 149 | ||
150 | spin_lock_init(&srq->lock); | 150 | spin_lock_init(&srq->lock); |
151 | refcount_set(&srq->refcnt, 1); | 151 | refcount_set(&srq->refcnt, 1); |
152 | init_waitqueue_head(&srq->wait); | 152 | init_completion(&srq->free); |
153 | 153 | ||
154 | dev_dbg(&dev->pdev->dev, | 154 | dev_dbg(&dev->pdev->dev, |
155 | "create shared receive queue from user space\n"); | 155 | "create shared receive queue from user space\n"); |
@@ -236,8 +236,9 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq) | |||
236 | dev->srq_tbl[srq->srq_handle] = NULL; | 236 | dev->srq_tbl[srq->srq_handle] = NULL; |
237 | spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); | 237 | spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); |
238 | 238 | ||
239 | if (!refcount_dec_and_test(&srq->refcnt)) | 239 | if (refcount_dec_and_test(&srq->refcnt)) |
240 | wait_event(srq->wait, !refcount_read(&srq->refcnt)); | 240 | complete(&srq->free); |
241 | wait_for_completion(&srq->free); | ||
241 | 242 | ||
242 | /* There is no support for kernel clients, so this is safe. */ | 243 | /* There is no support for kernel clients, so this is safe. */ |
243 | ib_umem_release(srq->umem); | 244 | ib_umem_release(srq->umem); |