diff options
author | Shani Michaeli <shanim@mellanox.com> | 2013-02-06 11:19:14 -0500 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2013-02-25 13:44:32 -0500 |
commit | 804d6a89a5c0b076317966bcbcd7a63d42241831 (patch) | |
tree | 07360d8b8c6f099bfc5007b951844a133d4c43bd | |
parent | e448834e3545e02789897ab68905220aea39cd40 (diff) |
mlx4: Implement memory windows allocation and deallocation
Implement MW allocation and deallocation in mlx4_core and mlx4_ib.
Pass down the enable bind flag when registering memory regions.
Signed-off-by: Haggai Eran <haggaie@mellanox.com>
Signed-off-by: Shani Michaeli <shanim@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r-- | drivers/infiniband/hw/mlx4/mlx4_ib.h | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mr.c | 52 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/mr.c | 95 | ||||
-rw-r--r-- | include/linux/mlx4/device.h | 20 |
4 files changed, 178 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index cce5dde94bc1..9ba0aaf3a58e 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -116,6 +116,11 @@ struct mlx4_ib_mr { | |||
116 | struct ib_umem *umem; | 116 | struct ib_umem *umem; |
117 | }; | 117 | }; |
118 | 118 | ||
119 | struct mlx4_ib_mw { | ||
120 | struct ib_mw ibmw; | ||
121 | struct mlx4_mw mmw; | ||
122 | }; | ||
123 | |||
119 | struct mlx4_ib_fast_reg_page_list { | 124 | struct mlx4_ib_fast_reg_page_list { |
120 | struct ib_fast_reg_page_list ibfrpl; | 125 | struct ib_fast_reg_page_list ibfrpl; |
121 | __be64 *mapped_page_list; | 126 | __be64 *mapped_page_list; |
@@ -533,6 +538,11 @@ static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr) | |||
533 | return container_of(ibmr, struct mlx4_ib_mr, ibmr); | 538 | return container_of(ibmr, struct mlx4_ib_mr, ibmr); |
534 | } | 539 | } |
535 | 540 | ||
541 | static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw) | ||
542 | { | ||
543 | return container_of(ibmw, struct mlx4_ib_mw, ibmw); | ||
544 | } | ||
545 | |||
536 | static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl) | 546 | static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl) |
537 | { | 547 | { |
538 | return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl); | 548 | return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl); |
@@ -581,6 +591,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
581 | u64 virt_addr, int access_flags, | 591 | u64 virt_addr, int access_flags, |
582 | struct ib_udata *udata); | 592 | struct ib_udata *udata); |
583 | int mlx4_ib_dereg_mr(struct ib_mr *mr); | 593 | int mlx4_ib_dereg_mr(struct ib_mr *mr); |
594 | struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type); | ||
595 | int mlx4_ib_dealloc_mw(struct ib_mw *mw); | ||
584 | struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, | 596 | struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, |
585 | int max_page_list_len); | 597 | int max_page_list_len); |
586 | struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, | 598 | struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, |
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 254e1cf26439..5adf4c47ee18 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
@@ -41,9 +41,19 @@ static u32 convert_access(int acc) | |||
41 | (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) | | 41 | (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) | |
42 | (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) | | 42 | (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) | |
43 | (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) | | 43 | (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) | |
44 | (acc & IB_ACCESS_MW_BIND ? MLX4_PERM_BIND_MW : 0) | | ||
44 | MLX4_PERM_LOCAL_READ; | 45 | MLX4_PERM_LOCAL_READ; |
45 | } | 46 | } |
46 | 47 | ||
48 | static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type) | ||
49 | { | ||
50 | switch (type) { | ||
51 | case IB_MW_TYPE_1: return MLX4_MW_TYPE_1; | ||
52 | case IB_MW_TYPE_2: return MLX4_MW_TYPE_2; | ||
53 | default: return -1; | ||
54 | } | ||
55 | } | ||
56 | |||
47 | struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) | 57 | struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) |
48 | { | 58 | { |
49 | struct mlx4_ib_mr *mr; | 59 | struct mlx4_ib_mr *mr; |
@@ -189,6 +199,48 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr) | |||
189 | return 0; | 199 | return 0; |
190 | } | 200 | } |
191 | 201 | ||
202 | struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) | ||
203 | { | ||
204 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | ||
205 | struct mlx4_ib_mw *mw; | ||
206 | int err; | ||
207 | |||
208 | mw = kmalloc(sizeof(*mw), GFP_KERNEL); | ||
209 | if (!mw) | ||
210 | return ERR_PTR(-ENOMEM); | ||
211 | |||
212 | err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn, | ||
213 | to_mlx4_type(type), &mw->mmw); | ||
214 | if (err) | ||
215 | goto err_free; | ||
216 | |||
217 | err = mlx4_mw_enable(dev->dev, &mw->mmw); | ||
218 | if (err) | ||
219 | goto err_mw; | ||
220 | |||
221 | mw->ibmw.rkey = mw->mmw.key; | ||
222 | |||
223 | return &mw->ibmw; | ||
224 | |||
225 | err_mw: | ||
226 | mlx4_mw_free(dev->dev, &mw->mmw); | ||
227 | |||
228 | err_free: | ||
229 | kfree(mw); | ||
230 | |||
231 | return ERR_PTR(err); | ||
232 | } | ||
233 | |||
234 | int mlx4_ib_dealloc_mw(struct ib_mw *ibmw) | ||
235 | { | ||
236 | struct mlx4_ib_mw *mw = to_mmw(ibmw); | ||
237 | |||
238 | mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw); | ||
239 | kfree(mw); | ||
240 | |||
241 | return 0; | ||
242 | } | ||
243 | |||
192 | struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, | 244 | struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, |
193 | int max_page_list_len) | 245 | int max_page_list_len) |
194 | { | 246 | { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 5e785bdcc694..602ca9bf78e4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c | |||
@@ -654,6 +654,101 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
654 | } | 654 | } |
655 | EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); | 655 | EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); |
656 | 656 | ||
657 | int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type, | ||
658 | struct mlx4_mw *mw) | ||
659 | { | ||
660 | u32 index; | ||
661 | |||
662 | if ((type == MLX4_MW_TYPE_1 && | ||
663 | !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) || | ||
664 | (type == MLX4_MW_TYPE_2 && | ||
665 | !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN))) | ||
666 | return -ENOTSUPP; | ||
667 | |||
668 | index = mlx4_mpt_reserve(dev); | ||
669 | if (index == -1) | ||
670 | return -ENOMEM; | ||
671 | |||
672 | mw->key = hw_index_to_key(index); | ||
673 | mw->pd = pd; | ||
674 | mw->type = type; | ||
675 | mw->enabled = MLX4_MPT_DISABLED; | ||
676 | |||
677 | return 0; | ||
678 | } | ||
679 | EXPORT_SYMBOL_GPL(mlx4_mw_alloc); | ||
680 | |||
681 | int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw) | ||
682 | { | ||
683 | struct mlx4_cmd_mailbox *mailbox; | ||
684 | struct mlx4_mpt_entry *mpt_entry; | ||
685 | int err; | ||
686 | |||
687 | err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key)); | ||
688 | if (err) | ||
689 | return err; | ||
690 | |||
691 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
692 | if (IS_ERR(mailbox)) { | ||
693 | err = PTR_ERR(mailbox); | ||
694 | goto err_table; | ||
695 | } | ||
696 | mpt_entry = mailbox->buf; | ||
697 | |||
698 | memset(mpt_entry, 0, sizeof(*mpt_entry)); | ||
699 | |||
700 | /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned | ||
701 | * off, thus creating a memory window and not a memory region. | ||
702 | */ | ||
703 | mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key)); | ||
704 | mpt_entry->pd_flags = cpu_to_be32(mw->pd); | ||
705 | if (mw->type == MLX4_MW_TYPE_2) { | ||
706 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); | ||
707 | mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP); | ||
708 | mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV); | ||
709 | } | ||
710 | |||
711 | err = mlx4_SW2HW_MPT(dev, mailbox, | ||
712 | key_to_hw_index(mw->key) & | ||
713 | (dev->caps.num_mpts - 1)); | ||
714 | if (err) { | ||
715 | mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); | ||
716 | goto err_cmd; | ||
717 | } | ||
718 | mw->enabled = MLX4_MPT_EN_HW; | ||
719 | |||
720 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
721 | |||
722 | return 0; | ||
723 | |||
724 | err_cmd: | ||
725 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
726 | |||
727 | err_table: | ||
728 | mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key)); | ||
729 | return err; | ||
730 | } | ||
731 | EXPORT_SYMBOL_GPL(mlx4_mw_enable); | ||
732 | |||
733 | void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw) | ||
734 | { | ||
735 | int err; | ||
736 | |||
737 | if (mw->enabled == MLX4_MPT_EN_HW) { | ||
738 | err = mlx4_HW2SW_MPT(dev, NULL, | ||
739 | key_to_hw_index(mw->key) & | ||
740 | (dev->caps.num_mpts - 1)); | ||
741 | if (err) | ||
742 | mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); | ||
743 | |||
744 | mw->enabled = MLX4_MPT_EN_SW; | ||
745 | } | ||
746 | if (mw->enabled) | ||
747 | mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key)); | ||
748 | mlx4_mpt_release(dev, key_to_hw_index(mw->key)); | ||
749 | } | ||
750 | EXPORT_SYMBOL_GPL(mlx4_mw_free); | ||
751 | |||
657 | int mlx4_init_mr_table(struct mlx4_dev *dev) | 752 | int mlx4_init_mr_table(struct mlx4_dev *dev) |
658 | { | 753 | { |
659 | struct mlx4_priv *priv = mlx4_priv(dev); | 754 | struct mlx4_priv *priv = mlx4_priv(dev); |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index e9fe8caaf8bb..67b4695e5940 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -170,6 +170,7 @@ enum { | |||
170 | #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) | 170 | #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) |
171 | 171 | ||
172 | enum { | 172 | enum { |
173 | MLX4_BMME_FLAG_WIN_TYPE_2B = 1 << 1, | ||
173 | MLX4_BMME_FLAG_LOCAL_INV = 1 << 6, | 174 | MLX4_BMME_FLAG_LOCAL_INV = 1 << 6, |
174 | MLX4_BMME_FLAG_REMOTE_INV = 1 << 7, | 175 | MLX4_BMME_FLAG_REMOTE_INV = 1 << 7, |
175 | MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, | 176 | MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, |
@@ -237,7 +238,8 @@ enum { | |||
237 | MLX4_PERM_LOCAL_WRITE = 1 << 11, | 238 | MLX4_PERM_LOCAL_WRITE = 1 << 11, |
238 | MLX4_PERM_REMOTE_READ = 1 << 12, | 239 | MLX4_PERM_REMOTE_READ = 1 << 12, |
239 | MLX4_PERM_REMOTE_WRITE = 1 << 13, | 240 | MLX4_PERM_REMOTE_WRITE = 1 << 13, |
240 | MLX4_PERM_ATOMIC = 1 << 14 | 241 | MLX4_PERM_ATOMIC = 1 << 14, |
242 | MLX4_PERM_BIND_MW = 1 << 15, | ||
241 | }; | 243 | }; |
242 | 244 | ||
243 | enum { | 245 | enum { |
@@ -503,6 +505,18 @@ struct mlx4_mr { | |||
503 | int enabled; | 505 | int enabled; |
504 | }; | 506 | }; |
505 | 507 | ||
508 | enum mlx4_mw_type { | ||
509 | MLX4_MW_TYPE_1 = 1, | ||
510 | MLX4_MW_TYPE_2 = 2, | ||
511 | }; | ||
512 | |||
513 | struct mlx4_mw { | ||
514 | u32 key; | ||
515 | u32 pd; | ||
516 | enum mlx4_mw_type type; | ||
517 | int enabled; | ||
518 | }; | ||
519 | |||
506 | struct mlx4_fmr { | 520 | struct mlx4_fmr { |
507 | struct mlx4_mr mr; | 521 | struct mlx4_mr mr; |
508 | struct mlx4_mpt_entry *mpt; | 522 | struct mlx4_mpt_entry *mpt; |
@@ -803,6 +817,10 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, | |||
803 | int npages, int page_shift, struct mlx4_mr *mr); | 817 | int npages, int page_shift, struct mlx4_mr *mr); |
804 | int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr); | 818 | int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr); |
805 | int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr); | 819 | int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr); |
820 | int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type, | ||
821 | struct mlx4_mw *mw); | ||
822 | void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw); | ||
823 | int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw); | ||
806 | int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | 824 | int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |
807 | int start_index, int npages, u64 *page_list); | 825 | int start_index, int npages, u64 *page_list); |
808 | int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | 826 | int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |