aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorMatan Barak <matanb@mellanox.com>2014-07-31 04:01:30 -0400
committerRoland Dreier <roland@purestorage.com>2014-08-01 18:11:13 -0400
commit9376932d0c26d5f5f89c95d5bd45123bba96d3a9 (patch)
tree5cd00b06c206e48c40d885b9eb6291c3593fd673 /drivers/infiniband
parente630664c8383f300c4146d7613d61e5a8eb1f8e3 (diff)
IB/mlx4_ib: Add support for user MR re-registration
This enables the user to change the protection domain, access flags and translation (address and length) of the MR. Use basic mlx4_core helper functions to get, update and set MPT and MTT objects according to the required modifications. Signed-off-by: Matan Barak <matanb@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h4
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c88
3 files changed, 93 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 0f7027e7db13..828a37b24816 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2007,6 +2007,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2007 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 2007 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2008 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 2008 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2009 (1ull << IB_USER_VERBS_CMD_REG_MR) | 2009 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2010 (1ull << IB_USER_VERBS_CMD_REREG_MR) |
2010 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 2011 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2011 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 2012 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2012 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 2013 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
@@ -2059,6 +2060,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2059 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq; 2060 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
2060 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr; 2061 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
2061 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr; 2062 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
2063 ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
2062 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr; 2064 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
2063 ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr; 2065 ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
2064 ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list; 2066 ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 369da3ca5d64..e8cad3926bfc 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -788,5 +788,9 @@ int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
788void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count); 788void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
789int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, 789int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
790 int is_attach); 790 int is_attach);
791int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
792 u64 start, u64 length, u64 virt_addr,
793 int mr_access_flags, struct ib_pd *pd,
794 struct ib_udata *udata);
791 795
792#endif /* MLX4_IB_H */ 796#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index cb2a8727f3fb..9b0e80e59b08 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -144,8 +144,10 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
144 if (!mr) 144 if (!mr)
145 return ERR_PTR(-ENOMEM); 145 return ERR_PTR(-ENOMEM);
146 146
147 /* Force registering the memory as writable. */
148 /* Used for memory re-registeration. HCA protects the access */
147 mr->umem = ib_umem_get(pd->uobject->context, start, length, 149 mr->umem = ib_umem_get(pd->uobject->context, start, length,
148 access_flags, 0); 150 access_flags | IB_ACCESS_LOCAL_WRITE, 0);
149 if (IS_ERR(mr->umem)) { 151 if (IS_ERR(mr->umem)) {
150 err = PTR_ERR(mr->umem); 152 err = PTR_ERR(mr->umem);
151 goto err_free; 153 goto err_free;
@@ -183,6 +185,90 @@ err_free:
183 return ERR_PTR(err); 185 return ERR_PTR(err);
184} 186}
185 187
188int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
189 u64 start, u64 length, u64 virt_addr,
190 int mr_access_flags, struct ib_pd *pd,
191 struct ib_udata *udata)
192{
193 struct mlx4_ib_dev *dev = to_mdev(mr->device);
194 struct mlx4_ib_mr *mmr = to_mmr(mr);
195 struct mlx4_mpt_entry *mpt_entry;
196 struct mlx4_mpt_entry **pmpt_entry = &mpt_entry;
197 int err;
198
199 /* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs,
200 * we assume that the calls can't run concurrently. Otherwise, a
201 * race exists.
202 */
203 err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry);
204
205 if (err)
206 return err;
207
208 if (flags & IB_MR_REREG_PD) {
209 err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry,
210 to_mpd(pd)->pdn);
211
212 if (err)
213 goto release_mpt_entry;
214 }
215
216 if (flags & IB_MR_REREG_ACCESS) {
217 err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
218 convert_access(mr_access_flags));
219
220 if (err)
221 goto release_mpt_entry;
222 }
223
224 if (flags & IB_MR_REREG_TRANS) {
225 int shift;
226 int err;
227 int n;
228
229 mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
230 ib_umem_release(mmr->umem);
231 mmr->umem = ib_umem_get(mr->uobject->context, start, length,
232 mr_access_flags |
233 IB_ACCESS_LOCAL_WRITE,
234 0);
235 if (IS_ERR(mmr->umem)) {
236 err = PTR_ERR(mmr->umem);
237 mmr->umem = NULL;
238 goto release_mpt_entry;
239 }
240 n = ib_umem_page_count(mmr->umem);
241 shift = ilog2(mmr->umem->page_size);
242
243 mmr->mmr.iova = virt_addr;
244 mmr->mmr.size = length;
245 err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
246 virt_addr, length, n, shift,
247 *pmpt_entry);
248 if (err) {
249 ib_umem_release(mmr->umem);
250 goto release_mpt_entry;
251 }
252
253 err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
254 if (err) {
255 mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
256 ib_umem_release(mmr->umem);
257 goto release_mpt_entry;
258 }
259 }
260
261 /* If we couldn't transfer the MR to the HCA, just remember to
262 * return a failure. But dereg_mr will free the resources.
263 */
264 err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry);
265
266release_mpt_entry:
267 mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
268
269 return err;
270}
271
186int mlx4_ib_dereg_mr(struct ib_mr *ibmr) 272int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
187{ 273{
188 struct mlx4_ib_mr *mr = to_mmr(ibmr); 274 struct mlx4_ib_mr *mr = to_mmr(ibmr);