aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/mr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mr.c')
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c45
1 files changed, 31 insertions, 14 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 38b06267798e..922ac85b7198 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -52,6 +52,8 @@ static __be64 mlx5_ib_update_mtt_emergency_buffer[
52static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex); 52static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
53#endif 53#endif
54 54
55static int clean_mr(struct mlx5_ib_mr *mr);
56
55static int order2idx(struct mlx5_ib_dev *dev, int order) 57static int order2idx(struct mlx5_ib_dev *dev, int order)
56{ 58{
57 struct mlx5_mr_cache *cache = &dev->cache; 59 struct mlx5_mr_cache *cache = &dev->cache;
@@ -1049,6 +1051,10 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1049 mlx5_ib_dbg(dev, "cache empty for order %d", order); 1051 mlx5_ib_dbg(dev, "cache empty for order %d", order);
1050 mr = NULL; 1052 mr = NULL;
1051 } 1053 }
1054 } else if (access_flags & IB_ACCESS_ON_DEMAND) {
1055 err = -EINVAL;
1056 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
1057 goto error;
1052 } 1058 }
1053 1059
1054 if (!mr) 1060 if (!mr)
@@ -1064,9 +1070,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1064 1070
1065 mr->umem = umem; 1071 mr->umem = umem;
1066 mr->npages = npages; 1072 mr->npages = npages;
1067 spin_lock(&dev->mr_lock); 1073 atomic_add(npages, &dev->mdev->priv.reg_pages);
1068 dev->mdev->priv.reg_pages += npages;
1069 spin_unlock(&dev->mr_lock);
1070 mr->ibmr.lkey = mr->mmr.key; 1074 mr->ibmr.lkey = mr->mmr.key;
1071 mr->ibmr.rkey = mr->mmr.key; 1075 mr->ibmr.rkey = mr->mmr.key;
1072 1076
@@ -1110,12 +1114,9 @@ error:
1110 return err; 1114 return err;
1111} 1115}
1112 1116
1113int mlx5_ib_dereg_mr(struct ib_mr *ibmr) 1117static int clean_mr(struct mlx5_ib_mr *mr)
1114{ 1118{
1115 struct mlx5_ib_dev *dev = to_mdev(ibmr->device); 1119 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1116 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1117 struct ib_umem *umem = mr->umem;
1118 int npages = mr->npages;
1119 int umred = mr->umred; 1120 int umred = mr->umred;
1120 int err; 1121 int err;
1121 1122
@@ -1135,16 +1136,32 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1135 free_cached_mr(dev, mr); 1136 free_cached_mr(dev, mr);
1136 } 1137 }
1137 1138
1139 if (!umred)
1140 kfree(mr);
1141
1142 return 0;
1143}
1144
1145int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1146{
1147 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1148 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1149 int npages = mr->npages;
1150 struct ib_umem *umem = mr->umem;
1151
1152#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1153 if (umem)
1154 /* Wait for all running page-fault handlers to finish. */
1155 synchronize_srcu(&dev->mr_srcu);
1156#endif
1157
1158 clean_mr(mr);
1159
1138 if (umem) { 1160 if (umem) {
1139 ib_umem_release(umem); 1161 ib_umem_release(umem);
1140 spin_lock(&dev->mr_lock); 1162 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1141 dev->mdev->priv.reg_pages -= npages;
1142 spin_unlock(&dev->mr_lock);
1143 } 1163 }
1144 1164
1145 if (!umred)
1146 kfree(mr);
1147
1148 return 0; 1165 return 0;
1149} 1166}
1150 1167