aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorShani Michaeli <shanim@mellanox.com>2013-02-06 11:19:14 -0500
committerRoland Dreier <roland@purestorage.com>2013-02-25 13:44:32 -0500
commit804d6a89a5c0b076317966bcbcd7a63d42241831 (patch)
tree07360d8b8c6f099bfc5007b951844a133d4c43bd /drivers/infiniband
parente448834e3545e02789897ab68905220aea39cd40 (diff)
mlx4: Implement memory windows allocation and deallocation
Implement MW allocation and deallocation in mlx4_core and mlx4_ib. Pass down the enable bind flag when registering memory regions. Signed-off-by: Haggai Eran <haggaie@mellanox.com> Signed-off-by: Shani Michaeli <shanim@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h12
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c52
2 files changed, 64 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index cce5dde94bc1..9ba0aaf3a58e 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -116,6 +116,11 @@ struct mlx4_ib_mr {
116 struct ib_umem *umem; 116 struct ib_umem *umem;
117}; 117};
118 118
119struct mlx4_ib_mw {
120 struct ib_mw ibmw;
121 struct mlx4_mw mmw;
122};
123
119struct mlx4_ib_fast_reg_page_list { 124struct mlx4_ib_fast_reg_page_list {
120 struct ib_fast_reg_page_list ibfrpl; 125 struct ib_fast_reg_page_list ibfrpl;
121 __be64 *mapped_page_list; 126 __be64 *mapped_page_list;
@@ -533,6 +538,11 @@ static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr)
533 return container_of(ibmr, struct mlx4_ib_mr, ibmr); 538 return container_of(ibmr, struct mlx4_ib_mr, ibmr);
534} 539}
535 540
541static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw)
542{
543 return container_of(ibmw, struct mlx4_ib_mw, ibmw);
544}
545
536static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl) 546static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
537{ 547{
538 return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl); 548 return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl);
@@ -581,6 +591,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
581 u64 virt_addr, int access_flags, 591 u64 virt_addr, int access_flags,
582 struct ib_udata *udata); 592 struct ib_udata *udata);
583int mlx4_ib_dereg_mr(struct ib_mr *mr); 593int mlx4_ib_dereg_mr(struct ib_mr *mr);
594struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
595int mlx4_ib_dealloc_mw(struct ib_mw *mw);
584struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, 596struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
585 int max_page_list_len); 597 int max_page_list_len);
586struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, 598struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 254e1cf26439..5adf4c47ee18 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -41,9 +41,19 @@ static u32 convert_access(int acc)
41 (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) | 41 (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) |
42 (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) | 42 (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) |
43 (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) | 43 (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) |
44 (acc & IB_ACCESS_MW_BIND ? MLX4_PERM_BIND_MW : 0) |
44 MLX4_PERM_LOCAL_READ; 45 MLX4_PERM_LOCAL_READ;
45} 46}
46 47
48static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type)
49{
50 switch (type) {
51 case IB_MW_TYPE_1: return MLX4_MW_TYPE_1;
52 case IB_MW_TYPE_2: return MLX4_MW_TYPE_2;
53 default: return -1;
54 }
55}
56
47struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) 57struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
48{ 58{
49 struct mlx4_ib_mr *mr; 59 struct mlx4_ib_mr *mr;
@@ -189,6 +199,48 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
189 return 0; 199 return 0;
190} 200}
191 201
202struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
203{
204 struct mlx4_ib_dev *dev = to_mdev(pd->device);
205 struct mlx4_ib_mw *mw;
206 int err;
207
208 mw = kmalloc(sizeof(*mw), GFP_KERNEL);
209 if (!mw)
210 return ERR_PTR(-ENOMEM);
211
212 err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
213 to_mlx4_type(type), &mw->mmw);
214 if (err)
215 goto err_free;
216
217 err = mlx4_mw_enable(dev->dev, &mw->mmw);
218 if (err)
219 goto err_mw;
220
221 mw->ibmw.rkey = mw->mmw.key;
222
223 return &mw->ibmw;
224
225err_mw:
226 mlx4_mw_free(dev->dev, &mw->mmw);
227
228err_free:
229 kfree(mw);
230
231 return ERR_PTR(err);
232}
233
234int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
235{
236 struct mlx4_ib_mw *mw = to_mmw(ibmw);
237
238 mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
239 kfree(mw);
240
241 return 0;
242}
243
192struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, 244struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
193 int max_page_list_len) 245 int max_page_list_len)
194{ 246{