aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/mr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mr.c')
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c33
1 files changed, 19 insertions, 14 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 2de4f4448f8a..49fc3ca735a4 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -48,13 +48,6 @@ enum {
48 MLX5_UMR_ALIGN = 2048 48 MLX5_UMR_ALIGN = 2048
49}; 49};
50 50
51static __be64 *mr_align(__be64 *ptr, int align)
52{
53 unsigned long mask = align - 1;
54
55 return (__be64 *)(((unsigned long)ptr + mask) & ~mask);
56}
57
58static int order2idx(struct mlx5_ib_dev *dev, int order) 51static int order2idx(struct mlx5_ib_dev *dev, int order)
59{ 52{
60 struct mlx5_mr_cache *cache = &dev->cache; 53 struct mlx5_mr_cache *cache = &dev->cache;
@@ -669,7 +662,7 @@ static int get_octo_len(u64 addr, u64 len, int page_size)
669 662
670static int use_umr(int order) 663static int use_umr(int order)
671{ 664{
672 return order <= 17; 665 return order <= MLX5_MAX_UMR_SHIFT;
673} 666}
674 667
675static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, 668static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
@@ -747,8 +740,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
747 struct ib_send_wr wr, *bad; 740 struct ib_send_wr wr, *bad;
748 struct mlx5_ib_mr *mr; 741 struct mlx5_ib_mr *mr;
749 struct ib_sge sg; 742 struct ib_sge sg;
750 int size = sizeof(u64) * npages; 743 int size;
751 __be64 *mr_pas; 744 __be64 *mr_pas;
745 __be64 *pas;
752 dma_addr_t dma; 746 dma_addr_t dma;
753 int err = 0; 747 int err = 0;
754 int i; 748 int i;
@@ -768,17 +762,22 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
768 if (!mr) 762 if (!mr)
769 return ERR_PTR(-EAGAIN); 763 return ERR_PTR(-EAGAIN);
770 764
765 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
766 * To avoid copying garbage after the pas array, we allocate
767 * a little more. */
768 size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
771 mr_pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL); 769 mr_pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
772 if (!mr_pas) { 770 if (!mr_pas) {
773 err = -ENOMEM; 771 err = -ENOMEM;
774 goto free_mr; 772 goto free_mr;
775 } 773 }
776 774
777 mlx5_ib_populate_pas(dev, umem, page_shift, 775 pas = PTR_ALIGN(mr_pas, MLX5_UMR_ALIGN);
778 mr_align(mr_pas, MLX5_UMR_ALIGN), 1); 776 mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
777 /* Clear padding after the actual pages. */
778 memset(pas + npages, 0, size - npages * sizeof(u64));
779 779
780 dma = dma_map_single(ddev, mr_align(mr_pas, MLX5_UMR_ALIGN), size, 780 dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
781 DMA_TO_DEVICE);
782 if (dma_mapping_error(ddev, dma)) { 781 if (dma_mapping_error(ddev, dma)) {
783 err = -ENOMEM; 782 err = -ENOMEM;
784 goto free_pas; 783 goto free_pas;
@@ -833,6 +832,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
833 struct mlx5_ib_mr *mr; 832 struct mlx5_ib_mr *mr;
834 int inlen; 833 int inlen;
835 int err; 834 int err;
835 bool pg_cap = !!(dev->mdev->caps.gen.flags &
836 MLX5_DEV_CAP_FLAG_ON_DMND_PG);
836 837
837 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 838 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
838 if (!mr) 839 if (!mr)
@@ -844,8 +845,12 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
844 err = -ENOMEM; 845 err = -ENOMEM;
845 goto err_1; 846 goto err_1;
846 } 847 }
847 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, 0); 848 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas,
849 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
848 850
851 /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
852 * in the page list submitted with the command. */
853 in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0;
849 in->seg.flags = convert_access(access_flags) | 854 in->seg.flags = convert_access(access_flags) |
850 MLX5_ACCESS_MODE_MTT; 855 MLX5_ACCESS_MODE_MTT;
851 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn); 856 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);