aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMoni Shoua <monis@mellanox.com>2019-03-19 05:24:36 -0400
committerJason Gunthorpe <jgg@mellanox.com>2019-03-27 14:27:11 -0400
commit1abe186ed8a6593069bc122da55fc684383fdc1c (patch)
treee72d2040a57a70dcb5285b5395b549047599f268
parentd0294344470e6b52d097aa7369173f32d11f2f52 (diff)
IB/mlx5: Reset access mask when looping inside page fault handler
If page-fault handler spans multiple MRs then the access mask needs to be reset before each MR handling or otherwise write access will be granted to mapped pages instead of read-only. Cc: <stable@vger.kernel.org> # 3.19 Fixes: 7bdf65d411c1 ("IB/mlx5: Handle page faults") Reported-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Moni Shoua <monis@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index c20bfc41ecf1..0aa10ebda5d9 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -585,7 +585,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
585 struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem); 585 struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
586 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE; 586 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
587 bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH; 587 bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
588 u64 access_mask = ODP_READ_ALLOWED_BIT; 588 u64 access_mask;
589 u64 start_idx, page_mask; 589 u64 start_idx, page_mask;
590 struct ib_umem_odp *odp; 590 struct ib_umem_odp *odp;
591 size_t size; 591 size_t size;
@@ -607,6 +607,7 @@ next_mr:
607 page_shift = mr->umem->page_shift; 607 page_shift = mr->umem->page_shift;
608 page_mask = ~(BIT(page_shift) - 1); 608 page_mask = ~(BIT(page_shift) - 1);
609 start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift; 609 start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
610 access_mask = ODP_READ_ALLOWED_BIT;
610 611
611 if (prefetch && !downgrade && !mr->umem->writable) { 612 if (prefetch && !downgrade && !mr->umem->writable) {
612 /* prefetch with write-access must 613 /* prefetch with write-access must