diff options
author | Jason Gunthorpe <jgg@mellanox.com> | 2019-10-01 11:38:19 -0400 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-10-04 14:54:22 -0400 |
commit | aa116b810ac9077a263ed8679fb4d595f180e0eb (patch) | |
tree | 0e00e8d84c48e8ce53d2035207be218238e1ebf1 | |
parent | 9dc775e7f5508f848661bbfb2e15683affb85f24 (diff) |
RDMA/mlx5: Order num_pending_prefetch properly with synchronize_srcu
During destroy setting live = 0 and then synchronize_srcu() prevents
num_pending_prefetch from incrementing, and also, ensures that all work
holding that count is queued on the WQ. Testing before causes races of the
form:
CPU0 CPU1
dereg_mr()
mlx5_ib_advise_mr_prefetch()
srcu_read_lock()
num_pending_prefetch_inc()
if (!live)
live = 0
atomic_read() == 0
// skip flush_workqueue()
atomic_inc()
queue_work();
srcu_read_unlock()
WARN_ON(atomic_read()) // Fails
Swap the order so that the synchronize_srcu() prevents this.
Fixes: a6bc3875f176 ("IB/mlx5: Protect against prefetch of invalid MR")
Link: https://lore.kernel.org/r/20191001153821.23621-5-jgg@ziepe.ca
Reviewed-by: Artemy Kovalyov <artemyko@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index e7f840f306e4..0ee8fa01177f 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c | |||
@@ -1609,13 +1609,14 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | |||
1609 | */ | 1609 | */ |
1610 | mr->live = 0; | 1610 | mr->live = 0; |
1611 | 1611 | ||
1612 | /* Wait for all running page-fault handlers to finish. */ | ||
1613 | synchronize_srcu(&dev->mr_srcu); | ||
1614 | |||
1612 | /* dequeue pending prefetch requests for the mr */ | 1615 | /* dequeue pending prefetch requests for the mr */ |
1613 | if (atomic_read(&mr->num_pending_prefetch)) | 1616 | if (atomic_read(&mr->num_pending_prefetch)) |
1614 | flush_workqueue(system_unbound_wq); | 1617 | flush_workqueue(system_unbound_wq); |
1615 | WARN_ON(atomic_read(&mr->num_pending_prefetch)); | 1618 | WARN_ON(atomic_read(&mr->num_pending_prefetch)); |
1616 | 1619 | ||
1617 | /* Wait for all running page-fault handlers to finish. */ | ||
1618 | synchronize_srcu(&dev->mr_srcu); | ||
1619 | /* Destroy all page mappings */ | 1620 | /* Destroy all page mappings */ |
1620 | if (!umem_odp->is_implicit_odp) | 1621 | if (!umem_odp->is_implicit_odp) |
1621 | mlx5_ib_invalidate_range(umem_odp, | 1622 | mlx5_ib_invalidate_range(umem_odp, |