aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJack Morgenstein <jackm@dev.mellanox.co.il>2014-07-28 16:30:22 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-30 17:00:06 -0400
commit9603b61de1eee92977d74ff42541be20c0c5b1a7 (patch)
treea0663e54d33771416e124ba30b4c1fd918a587dd
parent4ada97abe937cdb3fc029a871d5b0f21aa661a60 (diff)
mlx5: Move pci device handling from mlx5_ib to mlx5_core
In preparation for a new mlx5 device which is VPI (i.e., ports can be either IB or ETH), move the pci device functionality from mlx5_ib to mlx5_core. This involves the following changes: 1. Move mlx5_core_dev struct out of mlx5_ib_dev. mlx5_core_dev is now an independent structure maintained by mlx5_core. mlx5_ib_dev now has a pointer to that struct. This requires changing a lot of places where the core_dev struct was accessed via mlx5_ib_dev (now, this needs to be a pointer dereference). 2. All PCI initializations are now done in mlx5_core. Thus, it is now mlx5_core which does pci_register_device (and not mlx5_ib, as was previously). 3. mlx5_ib now registers itself with mlx5_core as an "interface" driver. This is very similar to the mechanism employed for the mlx4 (ConnectX) driver. Once the HCA is initialized (by mlx5_core), it invokes the interface drivers to do their initializations. 4. There is a new event handler which the core registers: mlx5_core_event(). This event handler invokes the event handlers registered by the interfaces. Based on a patch by Eli Cohen <eli@mellanox.com> Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Signed-off-by: Eli Cohen <eli@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c46
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c281
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h12
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c48
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c84
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c290
-rw-r--r--include/linux/mlx5/driver.h17
9 files changed, 498 insertions, 310 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 8ae4f896cb41..3b4dc858cef9 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -180,7 +180,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
180 struct mlx5_core_srq *msrq = NULL; 180 struct mlx5_core_srq *msrq = NULL;
181 181
182 if (qp->ibqp.xrcd) { 182 if (qp->ibqp.xrcd) {
183 msrq = mlx5_core_get_srq(&dev->mdev, 183 msrq = mlx5_core_get_srq(dev->mdev,
184 be32_to_cpu(cqe->srqn)); 184 be32_to_cpu(cqe->srqn));
185 srq = to_mibsrq(msrq); 185 srq = to_mibsrq(msrq);
186 } else { 186 } else {
@@ -364,7 +364,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
364 364
365static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) 365static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
366{ 366{
367 mlx5_buf_free(&dev->mdev, &buf->buf); 367 mlx5_buf_free(dev->mdev, &buf->buf);
368} 368}
369 369
370static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, 370static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
@@ -450,7 +450,7 @@ repoll:
450 * because CQs will be locked while QPs are removed 450 * because CQs will be locked while QPs are removed
451 * from the table. 451 * from the table.
452 */ 452 */
453 mqp = __mlx5_qp_lookup(&dev->mdev, qpn); 453 mqp = __mlx5_qp_lookup(dev->mdev, qpn);
454 if (unlikely(!mqp)) { 454 if (unlikely(!mqp)) {
455 mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n", 455 mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
456 cq->mcq.cqn, qpn); 456 cq->mcq.cqn, qpn);
@@ -514,11 +514,11 @@ repoll:
514 case MLX5_CQE_SIG_ERR: 514 case MLX5_CQE_SIG_ERR:
515 sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64; 515 sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
516 516
517 read_lock(&dev->mdev.priv.mr_table.lock); 517 read_lock(&dev->mdev->priv.mr_table.lock);
518 mmr = __mlx5_mr_lookup(&dev->mdev, 518 mmr = __mlx5_mr_lookup(dev->mdev,
519 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); 519 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
520 if (unlikely(!mmr)) { 520 if (unlikely(!mmr)) {
521 read_unlock(&dev->mdev.priv.mr_table.lock); 521 read_unlock(&dev->mdev->priv.mr_table.lock);
522 mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n", 522 mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
523 cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey)); 523 cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
524 return -EINVAL; 524 return -EINVAL;
@@ -536,7 +536,7 @@ repoll:
536 mr->sig->err_item.expected, 536 mr->sig->err_item.expected,
537 mr->sig->err_item.actual); 537 mr->sig->err_item.actual);
538 538
539 read_unlock(&dev->mdev.priv.mr_table.lock); 539 read_unlock(&dev->mdev->priv.mr_table.lock);
540 goto repoll; 540 goto repoll;
541 } 541 }
542 542
@@ -575,8 +575,8 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
575 mlx5_cq_arm(&to_mcq(ibcq)->mcq, 575 mlx5_cq_arm(&to_mcq(ibcq)->mcq,
576 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 576 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
577 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, 577 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
578 to_mdev(ibcq->device)->mdev.priv.uuari.uars[0].map, 578 to_mdev(ibcq->device)->mdev->priv.uuari.uars[0].map,
579 MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev.priv.cq_uar_lock)); 579 MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev->priv.cq_uar_lock));
580 580
581 return 0; 581 return 0;
582} 582}
@@ -586,7 +586,7 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
586{ 586{
587 int err; 587 int err;
588 588
589 err = mlx5_buf_alloc(&dev->mdev, nent * cqe_size, 589 err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
590 PAGE_SIZE * 2, &buf->buf); 590 PAGE_SIZE * 2, &buf->buf);
591 if (err) 591 if (err)
592 return err; 592 return err;
@@ -691,7 +691,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
691{ 691{
692 int err; 692 int err;
693 693
694 err = mlx5_db_alloc(&dev->mdev, &cq->db); 694 err = mlx5_db_alloc(dev->mdev, &cq->db);
695 if (err) 695 if (err)
696 return err; 696 return err;
697 697
@@ -716,7 +716,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
716 mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas); 716 mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
717 717
718 (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; 718 (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
719 *index = dev->mdev.priv.uuari.uars[0].index; 719 *index = dev->mdev->priv.uuari.uars[0].index;
720 720
721 return 0; 721 return 0;
722 722
@@ -724,14 +724,14 @@ err_buf:
724 free_cq_buf(dev, &cq->buf); 724 free_cq_buf(dev, &cq->buf);
725 725
726err_db: 726err_db:
727 mlx5_db_free(&dev->mdev, &cq->db); 727 mlx5_db_free(dev->mdev, &cq->db);
728 return err; 728 return err;
729} 729}
730 730
731static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) 731static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
732{ 732{
733 free_cq_buf(dev, &cq->buf); 733 free_cq_buf(dev, &cq->buf);
734 mlx5_db_free(&dev->mdev, &cq->db); 734 mlx5_db_free(dev->mdev, &cq->db);
735} 735}
736 736
737struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, 737struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
@@ -752,7 +752,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
752 return ERR_PTR(-EINVAL); 752 return ERR_PTR(-EINVAL);
753 753
754 entries = roundup_pow_of_two(entries + 1); 754 entries = roundup_pow_of_two(entries + 1);
755 if (entries > dev->mdev.caps.max_cqes) 755 if (entries > dev->mdev->caps.max_cqes)
756 return ERR_PTR(-EINVAL); 756 return ERR_PTR(-EINVAL);
757 757
758 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 758 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -789,7 +789,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
789 cqb->ctx.c_eqn = cpu_to_be16(eqn); 789 cqb->ctx.c_eqn = cpu_to_be16(eqn);
790 cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma); 790 cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
791 791
792 err = mlx5_core_create_cq(&dev->mdev, &cq->mcq, cqb, inlen); 792 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
793 if (err) 793 if (err)
794 goto err_cqb; 794 goto err_cqb;
795 795
@@ -809,7 +809,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
809 return &cq->ibcq; 809 return &cq->ibcq;
810 810
811err_cmd: 811err_cmd:
812 mlx5_core_destroy_cq(&dev->mdev, &cq->mcq); 812 mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
813 813
814err_cqb: 814err_cqb:
815 mlx5_vfree(cqb); 815 mlx5_vfree(cqb);
@@ -834,7 +834,7 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq)
834 if (cq->uobject) 834 if (cq->uobject)
835 context = cq->uobject->context; 835 context = cq->uobject->context;
836 836
837 mlx5_core_destroy_cq(&dev->mdev, &mcq->mcq); 837 mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
838 if (context) 838 if (context)
839 destroy_cq_user(mcq, context); 839 destroy_cq_user(mcq, context);
840 else 840 else
@@ -919,7 +919,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
919 int err; 919 int err;
920 u32 fsel; 920 u32 fsel;
921 921
922 if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER)) 922 if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
923 return -ENOSYS; 923 return -ENOSYS;
924 924
925 in = kzalloc(sizeof(*in), GFP_KERNEL); 925 in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -931,7 +931,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
931 in->ctx.cq_period = cpu_to_be16(cq_period); 931 in->ctx.cq_period = cpu_to_be16(cq_period);
932 in->ctx.cq_max_count = cpu_to_be16(cq_count); 932 in->ctx.cq_max_count = cpu_to_be16(cq_count);
933 in->field_select = cpu_to_be32(fsel); 933 in->field_select = cpu_to_be32(fsel);
934 err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in, sizeof(*in)); 934 err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
935 kfree(in); 935 kfree(in);
936 936
937 if (err) 937 if (err)
@@ -1074,7 +1074,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1074 int uninitialized_var(cqe_size); 1074 int uninitialized_var(cqe_size);
1075 unsigned long flags; 1075 unsigned long flags;
1076 1076
1077 if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) { 1077 if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
1078 pr_info("Firmware does not support resize CQ\n"); 1078 pr_info("Firmware does not support resize CQ\n");
1079 return -ENOSYS; 1079 return -ENOSYS;
1080 } 1080 }
@@ -1083,7 +1083,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1083 return -EINVAL; 1083 return -EINVAL;
1084 1084
1085 entries = roundup_pow_of_two(entries + 1); 1085 entries = roundup_pow_of_two(entries + 1);
1086 if (entries > dev->mdev.caps.max_cqes + 1) 1086 if (entries > dev->mdev->caps.max_cqes + 1)
1087 return -EINVAL; 1087 return -EINVAL;
1088 1088
1089 if (entries == ibcq->cqe + 1) 1089 if (entries == ibcq->cqe + 1)
@@ -1128,7 +1128,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1128 in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE); 1128 in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
1129 in->cqn = cpu_to_be32(cq->mcq.cqn); 1129 in->cqn = cpu_to_be32(cq->mcq.cqn);
1130 1130
1131 err = mlx5_core_modify_cq(&dev->mdev, &cq->mcq, in, inlen); 1131 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
1132 if (err) 1132 if (err)
1133 goto ex_alloc; 1133 goto ex_alloc;
1134 1134
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 5c8938be0e08..e259e7393152 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -54,7 +54,7 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
54 if (ignore_bkey || !in_wc) 54 if (ignore_bkey || !in_wc)
55 op_modifier |= 0x2; 55 op_modifier |= 0x2;
56 56
57 return mlx5_core_mad_ifc(&dev->mdev, in_mad, response_mad, op_modifier, port); 57 return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
58} 58}
59 59
60int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 60int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
129 129
130 packet_error = be16_to_cpu(out_mad->status); 130 packet_error = be16_to_cpu(out_mad->status);
131 131
132 dev->mdev.caps.ext_port_cap[port - 1] = (!err && !packet_error) ? 132 dev->mdev->caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
133 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; 133 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
134 134
135out: 135out:
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 364d4b6937f5..f2cfd363a705 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -54,96 +54,17 @@ MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
54MODULE_LICENSE("Dual BSD/GPL"); 54MODULE_LICENSE("Dual BSD/GPL");
55MODULE_VERSION(DRIVER_VERSION); 55MODULE_VERSION(DRIVER_VERSION);
56 56
57static int prof_sel = 2; 57static int deprecated_prof_sel = 2;
58module_param_named(prof_sel, prof_sel, int, 0444); 58module_param_named(prof_sel, deprecated_prof_sel, int, 0444);
59MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); 59MODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core");
60 60
61static char mlx5_version[] = 61static char mlx5_version[] =
62 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" 62 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
63 DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; 63 DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
64 64
65static struct mlx5_profile profile[] = {
66 [0] = {
67 .mask = 0,
68 },
69 [1] = {
70 .mask = MLX5_PROF_MASK_QP_SIZE,
71 .log_max_qp = 12,
72 },
73 [2] = {
74 .mask = MLX5_PROF_MASK_QP_SIZE |
75 MLX5_PROF_MASK_MR_CACHE,
76 .log_max_qp = 17,
77 .mr_cache[0] = {
78 .size = 500,
79 .limit = 250
80 },
81 .mr_cache[1] = {
82 .size = 500,
83 .limit = 250
84 },
85 .mr_cache[2] = {
86 .size = 500,
87 .limit = 250
88 },
89 .mr_cache[3] = {
90 .size = 500,
91 .limit = 250
92 },
93 .mr_cache[4] = {
94 .size = 500,
95 .limit = 250
96 },
97 .mr_cache[5] = {
98 .size = 500,
99 .limit = 250
100 },
101 .mr_cache[6] = {
102 .size = 500,
103 .limit = 250
104 },
105 .mr_cache[7] = {
106 .size = 500,
107 .limit = 250
108 },
109 .mr_cache[8] = {
110 .size = 500,
111 .limit = 250
112 },
113 .mr_cache[9] = {
114 .size = 500,
115 .limit = 250
116 },
117 .mr_cache[10] = {
118 .size = 500,
119 .limit = 250
120 },
121 .mr_cache[11] = {
122 .size = 500,
123 .limit = 250
124 },
125 .mr_cache[12] = {
126 .size = 64,
127 .limit = 32
128 },
129 .mr_cache[13] = {
130 .size = 32,
131 .limit = 16
132 },
133 .mr_cache[14] = {
134 .size = 16,
135 .limit = 8
136 },
137 .mr_cache[15] = {
138 .size = 8,
139 .limit = 4
140 },
141 },
142};
143
144int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn) 65int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
145{ 66{
146 struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; 67 struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
147 struct mlx5_eq *eq, *n; 68 struct mlx5_eq *eq, *n;
148 int err = -ENOENT; 69 int err = -ENOENT;
149 70
@@ -163,7 +84,7 @@ int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
163 84
164static int alloc_comp_eqs(struct mlx5_ib_dev *dev) 85static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
165{ 86{
166 struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; 87 struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
167 char name[MLX5_MAX_EQ_NAME]; 88 char name[MLX5_MAX_EQ_NAME];
168 struct mlx5_eq *eq, *n; 89 struct mlx5_eq *eq, *n;
169 int ncomp_vec; 90 int ncomp_vec;
@@ -182,9 +103,9 @@ static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
182 } 103 }
183 104
184 snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); 105 snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
185 err = mlx5_create_map_eq(&dev->mdev, eq, 106 err = mlx5_create_map_eq(dev->mdev, eq,
186 i + MLX5_EQ_VEC_COMP_BASE, nent, 0, 107 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
187 name, &dev->mdev.priv.uuari.uars[0]); 108 name, &dev->mdev->priv.uuari.uars[0]);
188 if (err) { 109 if (err) {
189 kfree(eq); 110 kfree(eq);
190 goto clean; 111 goto clean;
@@ -204,7 +125,7 @@ clean:
204 list_for_each_entry_safe(eq, n, &dev->eqs_list, list) { 125 list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
205 list_del(&eq->list); 126 list_del(&eq->list);
206 spin_unlock(&table->lock); 127 spin_unlock(&table->lock);
207 if (mlx5_destroy_unmap_eq(&dev->mdev, eq)) 128 if (mlx5_destroy_unmap_eq(dev->mdev, eq))
208 mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn); 129 mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
209 kfree(eq); 130 kfree(eq);
210 spin_lock(&table->lock); 131 spin_lock(&table->lock);
@@ -215,14 +136,14 @@ clean:
215 136
216static void free_comp_eqs(struct mlx5_ib_dev *dev) 137static void free_comp_eqs(struct mlx5_ib_dev *dev)
217{ 138{
218 struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; 139 struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
219 struct mlx5_eq *eq, *n; 140 struct mlx5_eq *eq, *n;
220 141
221 spin_lock(&table->lock); 142 spin_lock(&table->lock);
222 list_for_each_entry_safe(eq, n, &dev->eqs_list, list) { 143 list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
223 list_del(&eq->list); 144 list_del(&eq->list);
224 spin_unlock(&table->lock); 145 spin_unlock(&table->lock);
225 if (mlx5_destroy_unmap_eq(&dev->mdev, eq)) 146 if (mlx5_destroy_unmap_eq(dev->mdev, eq))
226 mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn); 147 mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
227 kfree(eq); 148 kfree(eq);
228 spin_lock(&table->lock); 149 spin_lock(&table->lock);
@@ -255,14 +176,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
255 176
256 memset(props, 0, sizeof(*props)); 177 memset(props, 0, sizeof(*props));
257 178
258 props->fw_ver = ((u64)fw_rev_maj(&dev->mdev) << 32) | 179 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
259 (fw_rev_min(&dev->mdev) << 16) | 180 (fw_rev_min(dev->mdev) << 16) |
260 fw_rev_sub(&dev->mdev); 181 fw_rev_sub(dev->mdev);
261 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 182 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
262 IB_DEVICE_PORT_ACTIVE_EVENT | 183 IB_DEVICE_PORT_ACTIVE_EVENT |
263 IB_DEVICE_SYS_IMAGE_GUID | 184 IB_DEVICE_SYS_IMAGE_GUID |
264 IB_DEVICE_RC_RNR_NAK_GEN; 185 IB_DEVICE_RC_RNR_NAK_GEN;
265 flags = dev->mdev.caps.flags; 186 flags = dev->mdev->caps.flags;
266 if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) 187 if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
267 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 188 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
268 if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR) 189 if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR)
@@ -292,30 +213,30 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
292 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 213 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
293 214
294 props->max_mr_size = ~0ull; 215 props->max_mr_size = ~0ull;
295 props->page_size_cap = dev->mdev.caps.min_page_sz; 216 props->page_size_cap = dev->mdev->caps.min_page_sz;
296 props->max_qp = 1 << dev->mdev.caps.log_max_qp; 217 props->max_qp = 1 << dev->mdev->caps.log_max_qp;
297 props->max_qp_wr = dev->mdev.caps.max_wqes; 218 props->max_qp_wr = dev->mdev->caps.max_wqes;
298 max_rq_sg = dev->mdev.caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); 219 max_rq_sg = dev->mdev->caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
299 max_sq_sg = (dev->mdev.caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) / 220 max_sq_sg = (dev->mdev->caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
300 sizeof(struct mlx5_wqe_data_seg); 221 sizeof(struct mlx5_wqe_data_seg);
301 props->max_sge = min(max_rq_sg, max_sq_sg); 222 props->max_sge = min(max_rq_sg, max_sq_sg);
302 props->max_cq = 1 << dev->mdev.caps.log_max_cq; 223 props->max_cq = 1 << dev->mdev->caps.log_max_cq;
303 props->max_cqe = dev->mdev.caps.max_cqes - 1; 224 props->max_cqe = dev->mdev->caps.max_cqes - 1;
304 props->max_mr = 1 << dev->mdev.caps.log_max_mkey; 225 props->max_mr = 1 << dev->mdev->caps.log_max_mkey;
305 props->max_pd = 1 << dev->mdev.caps.log_max_pd; 226 props->max_pd = 1 << dev->mdev->caps.log_max_pd;
306 props->max_qp_rd_atom = dev->mdev.caps.max_ra_req_qp; 227 props->max_qp_rd_atom = dev->mdev->caps.max_ra_req_qp;
307 props->max_qp_init_rd_atom = dev->mdev.caps.max_ra_res_qp; 228 props->max_qp_init_rd_atom = dev->mdev->caps.max_ra_res_qp;
308 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 229 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
309 props->max_srq = 1 << dev->mdev.caps.log_max_srq; 230 props->max_srq = 1 << dev->mdev->caps.log_max_srq;
310 props->max_srq_wr = dev->mdev.caps.max_srq_wqes - 1; 231 props->max_srq_wr = dev->mdev->caps.max_srq_wqes - 1;
311 props->max_srq_sge = max_rq_sg - 1; 232 props->max_srq_sge = max_rq_sg - 1;
312 props->max_fast_reg_page_list_len = (unsigned int)-1; 233 props->max_fast_reg_page_list_len = (unsigned int)-1;
313 props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay; 234 props->local_ca_ack_delay = dev->mdev->caps.local_ca_ack_delay;
314 props->atomic_cap = IB_ATOMIC_NONE; 235 props->atomic_cap = IB_ATOMIC_NONE;
315 props->masked_atomic_cap = IB_ATOMIC_NONE; 236 props->masked_atomic_cap = IB_ATOMIC_NONE;
316 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); 237 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
317 props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg; 238 props->max_mcast_grp = 1 << dev->mdev->caps.log_max_mcg;
318 props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg; 239 props->max_mcast_qp_attach = dev->mdev->caps.max_qp_mcg;
319 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 240 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
320 props->max_mcast_grp; 241 props->max_mcast_grp;
321 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ 242 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
@@ -336,7 +257,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
336 int ext_active_speed; 257 int ext_active_speed;
337 int err = -ENOMEM; 258 int err = -ENOMEM;
338 259
339 if (port < 1 || port > dev->mdev.caps.num_ports) { 260 if (port < 1 || port > dev->mdev->caps.num_ports) {
340 mlx5_ib_warn(dev, "invalid port number %d\n", port); 261 mlx5_ib_warn(dev, "invalid port number %d\n", port);
341 return -EINVAL; 262 return -EINVAL;
342 } 263 }
@@ -367,8 +288,8 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
367 props->phys_state = out_mad->data[33] >> 4; 288 props->phys_state = out_mad->data[33] >> 4;
368 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); 289 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
369 props->gid_tbl_len = out_mad->data[50]; 290 props->gid_tbl_len = out_mad->data[50];
370 props->max_msg_sz = 1 << to_mdev(ibdev)->mdev.caps.log_max_msg; 291 props->max_msg_sz = 1 << to_mdev(ibdev)->mdev->caps.log_max_msg;
371 props->pkey_tbl_len = to_mdev(ibdev)->mdev.caps.port[port - 1].pkey_table_len; 292 props->pkey_tbl_len = to_mdev(ibdev)->mdev->caps.port[port - 1].pkey_table_len;
372 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); 293 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
373 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); 294 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
374 props->active_width = out_mad->data[31] & 0xf; 295 props->active_width = out_mad->data[31] & 0xf;
@@ -395,7 +316,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
395 316
396 /* If reported active speed is QDR, check if is FDR-10 */ 317 /* If reported active speed is QDR, check if is FDR-10 */
397 if (props->active_speed == 4) { 318 if (props->active_speed == 4) {
398 if (dev->mdev.caps.ext_port_cap[port - 1] & 319 if (dev->mdev->caps.ext_port_cap[port - 1] &
399 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { 320 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
400 init_query_mad(in_mad); 321 init_query_mad(in_mad);
401 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; 322 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
@@ -508,7 +429,7 @@ static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
508 * a 144 trap. If cmd fails, just ignore. 429 * a 144 trap. If cmd fails, just ignore.
509 */ 430 */
510 memcpy(&in, props->node_desc, 64); 431 memcpy(&in, props->node_desc, 64);
511 err = mlx5_core_access_reg(&dev->mdev, &in, sizeof(in), &out, 432 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
512 sizeof(out), MLX5_REG_NODE_DESC, 0, 1); 433 sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
513 if (err) 434 if (err)
514 return err; 435 return err;
@@ -535,7 +456,7 @@ static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
535 tmp = (attr.port_cap_flags | props->set_port_cap_mask) & 456 tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
536 ~props->clr_port_cap_mask; 457 ~props->clr_port_cap_mask;
537 458
538 err = mlx5_set_port_caps(&dev->mdev, port, tmp); 459 err = mlx5_set_port_caps(dev->mdev, port, tmp);
539 460
540out: 461out:
541 mutex_unlock(&dev->cap_mask_mutex); 462 mutex_unlock(&dev->cap_mask_mutex);
@@ -591,14 +512,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
591 512
592 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; 513 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
593 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; 514 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
594 resp.qp_tab_size = 1 << dev->mdev.caps.log_max_qp; 515 resp.qp_tab_size = 1 << dev->mdev->caps.log_max_qp;
595 resp.bf_reg_size = dev->mdev.caps.bf_reg_size; 516 resp.bf_reg_size = dev->mdev->caps.bf_reg_size;
596 resp.cache_line_size = L1_CACHE_BYTES; 517 resp.cache_line_size = L1_CACHE_BYTES;
597 resp.max_sq_desc_sz = dev->mdev.caps.max_sq_desc_sz; 518 resp.max_sq_desc_sz = dev->mdev->caps.max_sq_desc_sz;
598 resp.max_rq_desc_sz = dev->mdev.caps.max_rq_desc_sz; 519 resp.max_rq_desc_sz = dev->mdev->caps.max_rq_desc_sz;
599 resp.max_send_wqebb = dev->mdev.caps.max_wqes; 520 resp.max_send_wqebb = dev->mdev->caps.max_wqes;
600 resp.max_recv_wr = dev->mdev.caps.max_wqes; 521 resp.max_recv_wr = dev->mdev->caps.max_wqes;
601 resp.max_srq_recv_wr = dev->mdev.caps.max_srq_wqes; 522 resp.max_srq_recv_wr = dev->mdev->caps.max_srq_wqes;
602 523
603 context = kzalloc(sizeof(*context), GFP_KERNEL); 524 context = kzalloc(sizeof(*context), GFP_KERNEL);
604 if (!context) 525 if (!context)
@@ -635,7 +556,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
635 } 556 }
636 557
637 for (i = 0; i < num_uars; i++) { 558 for (i = 0; i < num_uars; i++) {
638 err = mlx5_cmd_alloc_uar(&dev->mdev, &uars[i].index); 559 err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index);
639 if (err) 560 if (err)
640 goto out_count; 561 goto out_count;
641 } 562 }
@@ -644,7 +565,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
644 mutex_init(&context->db_page_mutex); 565 mutex_init(&context->db_page_mutex);
645 566
646 resp.tot_uuars = req.total_num_uuars; 567 resp.tot_uuars = req.total_num_uuars;
647 resp.num_ports = dev->mdev.caps.num_ports; 568 resp.num_ports = dev->mdev->caps.num_ports;
648 err = ib_copy_to_udata(udata, &resp, 569 err = ib_copy_to_udata(udata, &resp,
649 sizeof(resp) - sizeof(resp.reserved)); 570 sizeof(resp) - sizeof(resp.reserved));
650 if (err) 571 if (err)
@@ -658,7 +579,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
658 579
659out_uars: 580out_uars:
660 for (i--; i >= 0; i--) 581 for (i--; i >= 0; i--)
661 mlx5_cmd_free_uar(&dev->mdev, uars[i].index); 582 mlx5_cmd_free_uar(dev->mdev, uars[i].index);
662out_count: 583out_count:
663 kfree(uuari->count); 584 kfree(uuari->count);
664 585
@@ -681,7 +602,7 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
681 int i; 602 int i;
682 603
683 for (i = 0; i < uuari->num_uars; i++) { 604 for (i = 0; i < uuari->num_uars; i++) {
684 if (mlx5_cmd_free_uar(&dev->mdev, uuari->uars[i].index)) 605 if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
685 mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index); 606 mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
686 } 607 }
687 608
@@ -695,7 +616,7 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
695 616
696static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index) 617static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
697{ 618{
698 return (pci_resource_start(dev->mdev.pdev, 0) >> PAGE_SHIFT) + index; 619 return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index;
699} 620}
700 621
701static int get_command(unsigned long offset) 622static int get_command(unsigned long offset)
@@ -773,7 +694,7 @@ static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
773 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 694 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
774 seg->start_addr = 0; 695 seg->start_addr = 0;
775 696
776 err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in), 697 err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in),
777 NULL, NULL, NULL); 698 NULL, NULL, NULL);
778 if (err) { 699 if (err) {
779 mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); 700 mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
@@ -798,7 +719,7 @@ static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key)
798 719
799 memset(&mr, 0, sizeof(mr)); 720 memset(&mr, 0, sizeof(mr));
800 mr.key = key; 721 mr.key = key;
801 err = mlx5_core_destroy_mkey(&dev->mdev, &mr); 722 err = mlx5_core_destroy_mkey(dev->mdev, &mr);
802 if (err) 723 if (err)
803 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key); 724 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key);
804} 725}
@@ -815,7 +736,7 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
815 if (!pd) 736 if (!pd)
816 return ERR_PTR(-ENOMEM); 737 return ERR_PTR(-ENOMEM);
817 738
818 err = mlx5_core_alloc_pd(&to_mdev(ibdev)->mdev, &pd->pdn); 739 err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
819 if (err) { 740 if (err) {
820 kfree(pd); 741 kfree(pd);
821 return ERR_PTR(err); 742 return ERR_PTR(err);
@@ -824,14 +745,14 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
824 if (context) { 745 if (context) {
825 resp.pdn = pd->pdn; 746 resp.pdn = pd->pdn;
826 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 747 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
827 mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn); 748 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
828 kfree(pd); 749 kfree(pd);
829 return ERR_PTR(-EFAULT); 750 return ERR_PTR(-EFAULT);
830 } 751 }
831 } else { 752 } else {
832 err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn); 753 err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn);
833 if (err) { 754 if (err) {
834 mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn); 755 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
835 kfree(pd); 756 kfree(pd);
836 return ERR_PTR(err); 757 return ERR_PTR(err);
837 } 758 }
@@ -848,7 +769,7 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
848 if (!pd->uobject) 769 if (!pd->uobject)
849 free_pa_mkey(mdev, mpd->pa_lkey); 770 free_pa_mkey(mdev, mpd->pa_lkey);
850 771
851 mlx5_core_dealloc_pd(&mdev->mdev, mpd->pdn); 772 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
852 kfree(mpd); 773 kfree(mpd);
853 774
854 return 0; 775 return 0;
@@ -859,7 +780,7 @@ static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
859 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 780 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
860 int err; 781 int err;
861 782
862 err = mlx5_core_attach_mcg(&dev->mdev, gid, ibqp->qp_num); 783 err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
863 if (err) 784 if (err)
864 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n", 785 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
865 ibqp->qp_num, gid->raw); 786 ibqp->qp_num, gid->raw);
@@ -872,7 +793,7 @@ static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
872 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 793 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
873 int err; 794 int err;
874 795
875 err = mlx5_core_detach_mcg(&dev->mdev, gid, ibqp->qp_num); 796 err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
876 if (err) 797 if (err)
877 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n", 798 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
878 ibqp->qp_num, gid->raw); 799 ibqp->qp_num, gid->raw);
@@ -906,7 +827,7 @@ static int init_node_data(struct mlx5_ib_dev *dev)
906 if (err) 827 if (err)
907 goto out; 828 goto out;
908 829
909 dev->mdev.rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32)); 830 dev->mdev->rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32));
910 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); 831 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
911 832
912out: 833out:
@@ -921,7 +842,7 @@ static ssize_t show_fw_pages(struct device *device, struct device_attribute *att
921 struct mlx5_ib_dev *dev = 842 struct mlx5_ib_dev *dev =
922 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 843 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
923 844
924 return sprintf(buf, "%d\n", dev->mdev.priv.fw_pages); 845 return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
925} 846}
926 847
927static ssize_t show_reg_pages(struct device *device, 848static ssize_t show_reg_pages(struct device *device,
@@ -930,7 +851,7 @@ static ssize_t show_reg_pages(struct device *device,
930 struct mlx5_ib_dev *dev = 851 struct mlx5_ib_dev *dev =
931 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 852 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
932 853
933 return sprintf(buf, "%d\n", dev->mdev.priv.reg_pages); 854 return sprintf(buf, "%d\n", dev->mdev->priv.reg_pages);
934} 855}
935 856
936static ssize_t show_hca(struct device *device, struct device_attribute *attr, 857static ssize_t show_hca(struct device *device, struct device_attribute *attr,
@@ -938,7 +859,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
938{ 859{
939 struct mlx5_ib_dev *dev = 860 struct mlx5_ib_dev *dev =
940 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 861 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
941 return sprintf(buf, "MT%d\n", dev->mdev.pdev->device); 862 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
942} 863}
943 864
944static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, 865static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
@@ -946,8 +867,8 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
946{ 867{
947 struct mlx5_ib_dev *dev = 868 struct mlx5_ib_dev *dev =
948 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 869 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
949 return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(&dev->mdev), 870 return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
950 fw_rev_min(&dev->mdev), fw_rev_sub(&dev->mdev)); 871 fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
951} 872}
952 873
953static ssize_t show_rev(struct device *device, struct device_attribute *attr, 874static ssize_t show_rev(struct device *device, struct device_attribute *attr,
@@ -955,7 +876,7 @@ static ssize_t show_rev(struct device *device, struct device_attribute *attr,
955{ 876{
956 struct mlx5_ib_dev *dev = 877 struct mlx5_ib_dev *dev =
957 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 878 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
958 return sprintf(buf, "%x\n", dev->mdev.rev_id); 879 return sprintf(buf, "%x\n", dev->mdev->rev_id);
959} 880}
960 881
961static ssize_t show_board(struct device *device, struct device_attribute *attr, 882static ssize_t show_board(struct device *device, struct device_attribute *attr,
@@ -964,7 +885,7 @@ static ssize_t show_board(struct device *device, struct device_attribute *attr,
964 struct mlx5_ib_dev *dev = 885 struct mlx5_ib_dev *dev =
965 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 886 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
966 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN, 887 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
967 dev->mdev.board_id); 888 dev->mdev->board_id);
968} 889}
969 890
970static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 891static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
@@ -983,11 +904,12 @@ static struct device_attribute *mlx5_class_attributes[] = {
983 &dev_attr_reg_pages, 904 &dev_attr_reg_pages,
984}; 905};
985 906
986static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, 907static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
987 void *data) 908 enum mlx5_dev_event event, void *data)
988{ 909{
989 struct mlx5_ib_dev *ibdev = container_of(dev, struct mlx5_ib_dev, mdev); 910 struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
990 struct ib_event ibev; 911 struct ib_event ibev;
912
991 u8 port = 0; 913 u8 port = 0;
992 914
993 switch (event) { 915 switch (event) {
@@ -1047,7 +969,7 @@ static void get_ext_port_caps(struct mlx5_ib_dev *dev)
1047{ 969{
1048 int port; 970 int port;
1049 971
1050 for (port = 1; port <= dev->mdev.caps.num_ports; port++) 972 for (port = 1; port <= dev->mdev->caps.num_ports; port++)
1051 mlx5_query_ext_port_caps(dev, port); 973 mlx5_query_ext_port_caps(dev, port);
1052} 974}
1053 975
@@ -1072,14 +994,14 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
1072 goto out; 994 goto out;
1073 } 995 }
1074 996
1075 for (port = 1; port <= dev->mdev.caps.num_ports; port++) { 997 for (port = 1; port <= dev->mdev->caps.num_ports; port++) {
1076 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); 998 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
1077 if (err) { 999 if (err) {
1078 mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err); 1000 mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err);
1079 break; 1001 break;
1080 } 1002 }
1081 dev->mdev.caps.port[port - 1].pkey_table_len = dprops->max_pkeys; 1003 dev->mdev->caps.port[port - 1].pkey_table_len = dprops->max_pkeys;
1082 dev->mdev.caps.port[port - 1].gid_table_len = pprops->gid_tbl_len; 1004 dev->mdev->caps.port[port - 1].gid_table_len = pprops->gid_tbl_len;
1083 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", 1005 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
1084 dprops->max_pkeys, pprops->gid_tbl_len); 1006 dprops->max_pkeys, pprops->gid_tbl_len);
1085 } 1007 }
@@ -1328,10 +1250,8 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr)
1328 mlx5_ib_dealloc_pd(devr->p0); 1250 mlx5_ib_dealloc_pd(devr->p0);
1329} 1251}
1330 1252
1331static int init_one(struct pci_dev *pdev, 1253static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1332 const struct pci_device_id *id)
1333{ 1254{
1334 struct mlx5_core_dev *mdev;
1335 struct mlx5_ib_dev *dev; 1255 struct mlx5_ib_dev *dev;
1336 int err; 1256 int err;
1337 int i; 1257 int i;
@@ -1340,28 +1260,19 @@ static int init_one(struct pci_dev *pdev,
1340 1260
1341 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); 1261 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
1342 if (!dev) 1262 if (!dev)
1343 return -ENOMEM; 1263 return NULL;
1344 1264
1345 mdev = &dev->mdev; 1265 dev->mdev = mdev;
1346 mdev->event = mlx5_ib_event;
1347 if (prof_sel >= ARRAY_SIZE(profile)) {
1348 pr_warn("selected pofile out of range, selceting default\n");
1349 prof_sel = 0;
1350 }
1351 mdev->profile = &profile[prof_sel];
1352 err = mlx5_dev_init(mdev, pdev);
1353 if (err)
1354 goto err_free;
1355 1266
1356 err = get_port_caps(dev); 1267 err = get_port_caps(dev);
1357 if (err) 1268 if (err)
1358 goto err_cleanup; 1269 goto err_dealloc;
1359 1270
1360 get_ext_port_caps(dev); 1271 get_ext_port_caps(dev);
1361 1272
1362 err = alloc_comp_eqs(dev); 1273 err = alloc_comp_eqs(dev);
1363 if (err) 1274 if (err)
1364 goto err_cleanup; 1275 goto err_dealloc;
1365 1276
1366 MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); 1277 MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
1367 1278
@@ -1480,7 +1391,7 @@ static int init_one(struct pci_dev *pdev,
1480 1391
1481 dev->ib_active = true; 1392 dev->ib_active = true;
1482 1393
1483 return 0; 1394 return dev;
1484 1395
1485err_umrc: 1396err_umrc:
1486 destroy_umrc_res(dev); 1397 destroy_umrc_res(dev);
@@ -1494,49 +1405,39 @@ err_rsrc:
1494err_eqs: 1405err_eqs:
1495 free_comp_eqs(dev); 1406 free_comp_eqs(dev);
1496 1407
1497err_cleanup: 1408err_dealloc:
1498 mlx5_dev_cleanup(mdev);
1499
1500err_free:
1501 ib_dealloc_device((struct ib_device *)dev); 1409 ib_dealloc_device((struct ib_device *)dev);
1502 1410
1503 return err; 1411 return NULL;
1504} 1412}
1505 1413
1506static void remove_one(struct pci_dev *pdev) 1414static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
1507{ 1415{
1508 struct mlx5_ib_dev *dev = mlx5_pci2ibdev(pdev); 1416 struct mlx5_ib_dev *dev = context;
1509
1510 destroy_umrc_res(dev); 1417 destroy_umrc_res(dev);
1511 ib_unregister_device(&dev->ib_dev); 1418 ib_unregister_device(&dev->ib_dev);
1512 destroy_dev_resources(&dev->devr); 1419 destroy_dev_resources(&dev->devr);
1513 free_comp_eqs(dev); 1420 free_comp_eqs(dev);
1514 mlx5_dev_cleanup(&dev->mdev);
1515 ib_dealloc_device(&dev->ib_dev); 1421 ib_dealloc_device(&dev->ib_dev);
1516} 1422}
1517 1423
1518static DEFINE_PCI_DEVICE_TABLE(mlx5_ib_pci_table) = { 1424static struct mlx5_interface mlx5_ib_interface = {
1519 { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */ 1425 .add = mlx5_ib_add,
1520 { 0, } 1426 .remove = mlx5_ib_remove,
1521}; 1427 .event = mlx5_ib_event,
1522
1523MODULE_DEVICE_TABLE(pci, mlx5_ib_pci_table);
1524
1525static struct pci_driver mlx5_ib_driver = {
1526 .name = DRIVER_NAME,
1527 .id_table = mlx5_ib_pci_table,
1528 .probe = init_one,
1529 .remove = remove_one
1530}; 1428};
1531 1429
1532static int __init mlx5_ib_init(void) 1430static int __init mlx5_ib_init(void)
1533{ 1431{
1534 return pci_register_driver(&mlx5_ib_driver); 1432 if (deprecated_prof_sel != 2)
1433 pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
1434
1435 return mlx5_register_interface(&mlx5_ib_interface);
1535} 1436}
1536 1437
1537static void __exit mlx5_ib_cleanup(void) 1438static void __exit mlx5_ib_cleanup(void)
1538{ 1439{
1539 pci_unregister_driver(&mlx5_ib_driver); 1440 mlx5_unregister_interface(&mlx5_ib_interface);
1540} 1441}
1541 1442
1542module_init(mlx5_ib_init); 1443module_init(mlx5_ib_init);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index f2ccf1a5a291..a0e204ffe367 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -360,7 +360,7 @@ struct mlx5_ib_resources {
360 360
361struct mlx5_ib_dev { 361struct mlx5_ib_dev {
362 struct ib_device ib_dev; 362 struct ib_device ib_dev;
363 struct mlx5_core_dev mdev; 363 struct mlx5_core_dev *mdev;
364 MLX5_DECLARE_DOORBELL_LOCK(uar_lock); 364 MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
365 struct list_head eqs_list; 365 struct list_head eqs_list;
366 int num_ports; 366 int num_ports;
@@ -454,16 +454,6 @@ static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
454 return container_of(ibah, struct mlx5_ib_ah, ibah); 454 return container_of(ibah, struct mlx5_ib_ah, ibah);
455} 455}
456 456
457static inline struct mlx5_ib_dev *mlx5_core2ibdev(struct mlx5_core_dev *dev)
458{
459 return container_of(dev, struct mlx5_ib_dev, mdev);
460}
461
462static inline struct mlx5_ib_dev *mlx5_pci2ibdev(struct pci_dev *pdev)
463{
464 return mlx5_core2ibdev(pci2mlx5_core_dev(pdev));
465}
466
467int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, 457int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
468 struct mlx5_db *db); 458 struct mlx5_db *db);
469void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); 459void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index afa873bd028e..80b3c63eab5d 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -73,7 +73,7 @@ static void reg_mr_callback(int status, void *context)
73 struct mlx5_cache_ent *ent = &cache->ent[c]; 73 struct mlx5_cache_ent *ent = &cache->ent[c];
74 u8 key; 74 u8 key;
75 unsigned long flags; 75 unsigned long flags;
76 struct mlx5_mr_table *table = &dev->mdev.priv.mr_table; 76 struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
77 int err; 77 int err;
78 78
79 spin_lock_irqsave(&ent->lock, flags); 79 spin_lock_irqsave(&ent->lock, flags);
@@ -97,9 +97,9 @@ static void reg_mr_callback(int status, void *context)
97 return; 97 return;
98 } 98 }
99 99
100 spin_lock_irqsave(&dev->mdev.priv.mkey_lock, flags); 100 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
101 key = dev->mdev.priv.mkey_key++; 101 key = dev->mdev->priv.mkey_key++;
102 spin_unlock_irqrestore(&dev->mdev.priv.mkey_lock, flags); 102 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
103 mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key; 103 mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
104 104
105 cache->last_add = jiffies; 105 cache->last_add = jiffies;
@@ -155,7 +155,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
155 spin_lock_irq(&ent->lock); 155 spin_lock_irq(&ent->lock);
156 ent->pending++; 156 ent->pending++;
157 spin_unlock_irq(&ent->lock); 157 spin_unlock_irq(&ent->lock);
158 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, 158 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
159 sizeof(*in), reg_mr_callback, 159 sizeof(*in), reg_mr_callback,
160 mr, &mr->out); 160 mr, &mr->out);
161 if (err) { 161 if (err) {
@@ -188,7 +188,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
188 ent->cur--; 188 ent->cur--;
189 ent->size--; 189 ent->size--;
190 spin_unlock_irq(&ent->lock); 190 spin_unlock_irq(&ent->lock);
191 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 191 err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
192 if (err) 192 if (err)
193 mlx5_ib_warn(dev, "failed destroy mkey\n"); 193 mlx5_ib_warn(dev, "failed destroy mkey\n");
194 else 194 else
@@ -479,7 +479,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
479 ent->cur--; 479 ent->cur--;
480 ent->size--; 480 ent->size--;
481 spin_unlock_irq(&ent->lock); 481 spin_unlock_irq(&ent->lock);
482 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 482 err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
483 if (err) 483 if (err)
484 mlx5_ib_warn(dev, "failed destroy mkey\n"); 484 mlx5_ib_warn(dev, "failed destroy mkey\n");
485 else 485 else
@@ -496,7 +496,7 @@ static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
496 if (!mlx5_debugfs_root) 496 if (!mlx5_debugfs_root)
497 return 0; 497 return 0;
498 498
499 cache->root = debugfs_create_dir("mr_cache", dev->mdev.priv.dbg_root); 499 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
500 if (!cache->root) 500 if (!cache->root)
501 return -ENOMEM; 501 return -ENOMEM;
502 502
@@ -571,8 +571,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
571 ent->order = i + 2; 571 ent->order = i + 2;
572 ent->dev = dev; 572 ent->dev = dev;
573 573
574 if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE) 574 if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
575 limit = dev->mdev.profile->mr_cache[i].limit; 575 limit = dev->mdev->profile->mr_cache[i].limit;
576 else 576 else
577 limit = 0; 577 limit = 0;
578 578
@@ -610,7 +610,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
610struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) 610struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
611{ 611{
612 struct mlx5_ib_dev *dev = to_mdev(pd->device); 612 struct mlx5_ib_dev *dev = to_mdev(pd->device);
613 struct mlx5_core_dev *mdev = &dev->mdev; 613 struct mlx5_core_dev *mdev = dev->mdev;
614 struct mlx5_create_mkey_mbox_in *in; 614 struct mlx5_create_mkey_mbox_in *in;
615 struct mlx5_mkey_seg *seg; 615 struct mlx5_mkey_seg *seg;
616 struct mlx5_ib_mr *mr; 616 struct mlx5_ib_mr *mr;
@@ -846,7 +846,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
846 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 846 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
847 in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length, 847 in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
848 1 << page_shift)); 848 1 << page_shift));
849 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen, NULL, 849 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
850 NULL, NULL); 850 NULL, NULL);
851 if (err) { 851 if (err) {
852 mlx5_ib_warn(dev, "create mkey failed\n"); 852 mlx5_ib_warn(dev, "create mkey failed\n");
@@ -923,7 +923,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
923 mr->umem = umem; 923 mr->umem = umem;
924 mr->npages = npages; 924 mr->npages = npages;
925 spin_lock(&dev->mr_lock); 925 spin_lock(&dev->mr_lock);
926 dev->mdev.priv.reg_pages += npages; 926 dev->mdev->priv.reg_pages += npages;
927 spin_unlock(&dev->mr_lock); 927 spin_unlock(&dev->mr_lock);
928 mr->ibmr.lkey = mr->mmr.key; 928 mr->ibmr.lkey = mr->mmr.key;
929 mr->ibmr.rkey = mr->mmr.key; 929 mr->ibmr.rkey = mr->mmr.key;
@@ -978,7 +978,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
978 int err; 978 int err;
979 979
980 if (!umred) { 980 if (!umred) {
981 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 981 err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
982 if (err) { 982 if (err) {
983 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", 983 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
984 mr->mmr.key, err); 984 mr->mmr.key, err);
@@ -996,7 +996,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
996 if (umem) { 996 if (umem) {
997 ib_umem_release(umem); 997 ib_umem_release(umem);
998 spin_lock(&dev->mr_lock); 998 spin_lock(&dev->mr_lock);
999 dev->mdev.priv.reg_pages -= npages; 999 dev->mdev->priv.reg_pages -= npages;
1000 spin_unlock(&dev->mr_lock); 1000 spin_unlock(&dev->mr_lock);
1001 } 1001 }
1002 1002
@@ -1044,7 +1044,7 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
1044 } 1044 }
1045 1045
1046 /* create mem & wire PSVs */ 1046 /* create mem & wire PSVs */
1047 err = mlx5_core_create_psv(&dev->mdev, to_mpd(pd)->pdn, 1047 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
1048 2, psv_index); 1048 2, psv_index);
1049 if (err) 1049 if (err)
1050 goto err_free_sig; 1050 goto err_free_sig;
@@ -1060,7 +1060,7 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
1060 } 1060 }
1061 1061
1062 in->seg.flags = MLX5_PERM_UMR_EN | access_mode; 1062 in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
1063 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), 1063 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in),
1064 NULL, NULL, NULL); 1064 NULL, NULL, NULL);
1065 if (err) 1065 if (err)
1066 goto err_destroy_psv; 1066 goto err_destroy_psv;
@@ -1074,11 +1074,11 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
1074 1074
1075err_destroy_psv: 1075err_destroy_psv:
1076 if (mr->sig) { 1076 if (mr->sig) {
1077 if (mlx5_core_destroy_psv(&dev->mdev, 1077 if (mlx5_core_destroy_psv(dev->mdev,
1078 mr->sig->psv_memory.psv_idx)) 1078 mr->sig->psv_memory.psv_idx))
1079 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", 1079 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1080 mr->sig->psv_memory.psv_idx); 1080 mr->sig->psv_memory.psv_idx);
1081 if (mlx5_core_destroy_psv(&dev->mdev, 1081 if (mlx5_core_destroy_psv(dev->mdev,
1082 mr->sig->psv_wire.psv_idx)) 1082 mr->sig->psv_wire.psv_idx))
1083 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", 1083 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1084 mr->sig->psv_wire.psv_idx); 1084 mr->sig->psv_wire.psv_idx);
@@ -1099,18 +1099,18 @@ int mlx5_ib_destroy_mr(struct ib_mr *ibmr)
1099 int err; 1099 int err;
1100 1100
1101 if (mr->sig) { 1101 if (mr->sig) {
1102 if (mlx5_core_destroy_psv(&dev->mdev, 1102 if (mlx5_core_destroy_psv(dev->mdev,
1103 mr->sig->psv_memory.psv_idx)) 1103 mr->sig->psv_memory.psv_idx))
1104 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", 1104 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1105 mr->sig->psv_memory.psv_idx); 1105 mr->sig->psv_memory.psv_idx);
1106 if (mlx5_core_destroy_psv(&dev->mdev, 1106 if (mlx5_core_destroy_psv(dev->mdev,
1107 mr->sig->psv_wire.psv_idx)) 1107 mr->sig->psv_wire.psv_idx))
1108 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", 1108 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1109 mr->sig->psv_wire.psv_idx); 1109 mr->sig->psv_wire.psv_idx);
1110 kfree(mr->sig); 1110 kfree(mr->sig);
1111 } 1111 }
1112 1112
1113 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 1113 err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
1114 if (err) { 1114 if (err) {
1115 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", 1115 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1116 mr->mmr.key, err); 1116 mr->mmr.key, err);
@@ -1149,7 +1149,7 @@ struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
1149 * TBD not needed - issue 197292 */ 1149 * TBD not needed - issue 197292 */
1150 in->seg.log2_page_size = PAGE_SHIFT; 1150 in->seg.log2_page_size = PAGE_SHIFT;
1151 1151
1152 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), NULL, 1152 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
1153 NULL, NULL); 1153 NULL, NULL);
1154 kfree(in); 1154 kfree(in);
1155 if (err) 1155 if (err)
@@ -1202,7 +1202,7 @@ void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1202 struct mlx5_ib_dev *dev = to_mdev(page_list->device); 1202 struct mlx5_ib_dev *dev = to_mdev(page_list->device);
1203 int size = page_list->max_page_list_len * sizeof(u64); 1203 int size = page_list->max_page_list_len * sizeof(u64);
1204 1204
1205 dma_free_coherent(&dev->mdev.pdev->dev, size, mfrpl->mapped_page_list, 1205 dma_free_coherent(&dev->mdev->pdev->dev, size, mfrpl->mapped_page_list,
1206 mfrpl->map); 1206 mfrpl->map);
1207 kfree(mfrpl->ibfrpl.page_list); 1207 kfree(mfrpl->ibfrpl.page_list);
1208 kfree(mfrpl); 1208 kfree(mfrpl);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index bbbcf389272c..b8bb6ad6350c 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -162,7 +162,7 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
162 int wq_size; 162 int wq_size;
163 163
164 /* Sanity check RQ size before proceeding */ 164 /* Sanity check RQ size before proceeding */
165 if (cap->max_recv_wr > dev->mdev.caps.max_wqes) 165 if (cap->max_recv_wr > dev->mdev->caps.max_wqes)
166 return -EINVAL; 166 return -EINVAL;
167 167
168 if (!has_rq) { 168 if (!has_rq) {
@@ -182,10 +182,10 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
182 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; 182 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
183 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); 183 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
184 qp->rq.wqe_cnt = wq_size / wqe_size; 184 qp->rq.wqe_cnt = wq_size / wqe_size;
185 if (wqe_size > dev->mdev.caps.max_rq_desc_sz) { 185 if (wqe_size > dev->mdev->caps.max_rq_desc_sz) {
186 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", 186 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
187 wqe_size, 187 wqe_size,
188 dev->mdev.caps.max_rq_desc_sz); 188 dev->mdev->caps.max_rq_desc_sz);
189 return -EINVAL; 189 return -EINVAL;
190 } 190 }
191 qp->rq.wqe_shift = ilog2(wqe_size); 191 qp->rq.wqe_shift = ilog2(wqe_size);
@@ -277,9 +277,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
277 if (wqe_size < 0) 277 if (wqe_size < 0)
278 return wqe_size; 278 return wqe_size;
279 279
280 if (wqe_size > dev->mdev.caps.max_sq_desc_sz) { 280 if (wqe_size > dev->mdev->caps.max_sq_desc_sz) {
281 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", 281 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
282 wqe_size, dev->mdev.caps.max_sq_desc_sz); 282 wqe_size, dev->mdev->caps.max_sq_desc_sz);
283 return -EINVAL; 283 return -EINVAL;
284 } 284 }
285 285
@@ -292,9 +292,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
292 292
293 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); 293 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
294 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; 294 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
295 if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) { 295 if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) {
296 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", 296 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
297 qp->sq.wqe_cnt, dev->mdev.caps.max_wqes); 297 qp->sq.wqe_cnt, dev->mdev->caps.max_wqes);
298 return -ENOMEM; 298 return -ENOMEM;
299 } 299 }
300 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 300 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -311,9 +311,9 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
311{ 311{
312 int desc_sz = 1 << qp->sq.wqe_shift; 312 int desc_sz = 1 << qp->sq.wqe_shift;
313 313
314 if (desc_sz > dev->mdev.caps.max_sq_desc_sz) { 314 if (desc_sz > dev->mdev->caps.max_sq_desc_sz) {
315 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", 315 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
316 desc_sz, dev->mdev.caps.max_sq_desc_sz); 316 desc_sz, dev->mdev->caps.max_sq_desc_sz);
317 return -EINVAL; 317 return -EINVAL;
318 } 318 }
319 319
@@ -325,9 +325,9 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
325 325
326 qp->sq.wqe_cnt = ucmd->sq_wqe_count; 326 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
327 327
328 if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) { 328 if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) {
329 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", 329 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
330 qp->sq.wqe_cnt, dev->mdev.caps.max_wqes); 330 qp->sq.wqe_cnt, dev->mdev->caps.max_wqes);
331 return -EINVAL; 331 return -EINVAL;
332 } 332 }
333 333
@@ -674,7 +674,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
674 int uuarn; 674 int uuarn;
675 int err; 675 int err;
676 676
677 uuari = &dev->mdev.priv.uuari; 677 uuari = &dev->mdev->priv.uuari;
678 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)) 678 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
679 return -EINVAL; 679 return -EINVAL;
680 680
@@ -700,7 +700,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
700 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; 700 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
701 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); 701 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
702 702
703 err = mlx5_buf_alloc(&dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf); 703 err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
704 if (err) { 704 if (err) {
705 mlx5_ib_dbg(dev, "err %d\n", err); 705 mlx5_ib_dbg(dev, "err %d\n", err);
706 goto err_uuar; 706 goto err_uuar;
@@ -722,7 +722,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
722 722
723 mlx5_fill_page_array(&qp->buf, (*in)->pas); 723 mlx5_fill_page_array(&qp->buf, (*in)->pas);
724 724
725 err = mlx5_db_alloc(&dev->mdev, &qp->db); 725 err = mlx5_db_alloc(dev->mdev, &qp->db);
726 if (err) { 726 if (err) {
727 mlx5_ib_dbg(dev, "err %d\n", err); 727 mlx5_ib_dbg(dev, "err %d\n", err);
728 goto err_free; 728 goto err_free;
@@ -747,7 +747,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
747 return 0; 747 return 0;
748 748
749err_wrid: 749err_wrid:
750 mlx5_db_free(&dev->mdev, &qp->db); 750 mlx5_db_free(dev->mdev, &qp->db);
751 kfree(qp->sq.wqe_head); 751 kfree(qp->sq.wqe_head);
752 kfree(qp->sq.w_list); 752 kfree(qp->sq.w_list);
753 kfree(qp->sq.wrid); 753 kfree(qp->sq.wrid);
@@ -758,23 +758,23 @@ err_free:
758 mlx5_vfree(*in); 758 mlx5_vfree(*in);
759 759
760err_buf: 760err_buf:
761 mlx5_buf_free(&dev->mdev, &qp->buf); 761 mlx5_buf_free(dev->mdev, &qp->buf);
762 762
763err_uuar: 763err_uuar:
764 free_uuar(&dev->mdev.priv.uuari, uuarn); 764 free_uuar(&dev->mdev->priv.uuari, uuarn);
765 return err; 765 return err;
766} 766}
767 767
768static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) 768static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
769{ 769{
770 mlx5_db_free(&dev->mdev, &qp->db); 770 mlx5_db_free(dev->mdev, &qp->db);
771 kfree(qp->sq.wqe_head); 771 kfree(qp->sq.wqe_head);
772 kfree(qp->sq.w_list); 772 kfree(qp->sq.w_list);
773 kfree(qp->sq.wrid); 773 kfree(qp->sq.wrid);
774 kfree(qp->sq.wr_data); 774 kfree(qp->sq.wr_data);
775 kfree(qp->rq.wrid); 775 kfree(qp->rq.wrid);
776 mlx5_buf_free(&dev->mdev, &qp->buf); 776 mlx5_buf_free(dev->mdev, &qp->buf);
777 free_uuar(&dev->mdev.priv.uuari, qp->bf->uuarn); 777 free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
778} 778}
779 779
780static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) 780static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
@@ -812,7 +812,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
812 spin_lock_init(&qp->rq.lock); 812 spin_lock_init(&qp->rq.lock);
813 813
814 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { 814 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
815 if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) { 815 if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
816 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); 816 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
817 return -EINVAL; 817 return -EINVAL;
818 } else { 818 } else {
@@ -851,9 +851,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
851 mlx5_ib_dbg(dev, "invalid rq params\n"); 851 mlx5_ib_dbg(dev, "invalid rq params\n");
852 return -EINVAL; 852 return -EINVAL;
853 } 853 }
854 if (ucmd.sq_wqe_count > dev->mdev.caps.max_wqes) { 854 if (ucmd.sq_wqe_count > dev->mdev->caps.max_wqes) {
855 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n", 855 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
856 ucmd.sq_wqe_count, dev->mdev.caps.max_wqes); 856 ucmd.sq_wqe_count, dev->mdev->caps.max_wqes);
857 return -EINVAL; 857 return -EINVAL;
858 } 858 }
859 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); 859 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
@@ -957,7 +957,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
957 957
958 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma); 958 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
959 959
960 err = mlx5_core_create_qp(&dev->mdev, &qp->mqp, in, inlen); 960 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
961 if (err) { 961 if (err) {
962 mlx5_ib_dbg(dev, "create qp failed\n"); 962 mlx5_ib_dbg(dev, "create qp failed\n");
963 goto err_create; 963 goto err_create;
@@ -1081,7 +1081,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1081 if (!in) 1081 if (!in)
1082 return; 1082 return;
1083 if (qp->state != IB_QPS_RESET) 1083 if (qp->state != IB_QPS_RESET)
1084 if (mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(qp->state), 1084 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
1085 MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp)) 1085 MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
1086 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n", 1086 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
1087 qp->mqp.qpn); 1087 qp->mqp.qpn);
@@ -1097,7 +1097,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1097 mlx5_ib_unlock_cqs(send_cq, recv_cq); 1097 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1098 } 1098 }
1099 1099
1100 err = mlx5_core_destroy_qp(&dev->mdev, &qp->mqp); 1100 err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
1101 if (err) 1101 if (err)
1102 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn); 1102 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
1103 kfree(in); 1103 kfree(in);
@@ -1165,7 +1165,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1165 switch (init_attr->qp_type) { 1165 switch (init_attr->qp_type) {
1166 case IB_QPT_XRC_TGT: 1166 case IB_QPT_XRC_TGT:
1167 case IB_QPT_XRC_INI: 1167 case IB_QPT_XRC_INI:
1168 if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC)) { 1168 if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC)) {
1169 mlx5_ib_dbg(dev, "XRC not supported\n"); 1169 mlx5_ib_dbg(dev, "XRC not supported\n");
1170 return ERR_PTR(-ENOSYS); 1170 return ERR_PTR(-ENOSYS);
1171 } 1171 }
@@ -1279,7 +1279,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1279 } else { 1279 } else {
1280 while (rate != IB_RATE_2_5_GBPS && 1280 while (rate != IB_RATE_2_5_GBPS &&
1281 !(1 << (rate + MLX5_STAT_RATE_OFFSET) & 1281 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1282 dev->mdev.caps.stat_rate_support)) 1282 dev->mdev->caps.stat_rate_support))
1283 --rate; 1283 --rate;
1284 } 1284 }
1285 1285
@@ -1318,9 +1318,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1318 path->port = port; 1318 path->port = port;
1319 1319
1320 if (ah->ah_flags & IB_AH_GRH) { 1320 if (ah->ah_flags & IB_AH_GRH) {
1321 if (ah->grh.sgid_index >= dev->mdev.caps.port[port - 1].gid_table_len) { 1321 if (ah->grh.sgid_index >= dev->mdev->caps.port[port - 1].gid_table_len) {
1322 pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n", 1322 pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
1323 ah->grh.sgid_index, dev->mdev.caps.port[port - 1].gid_table_len); 1323 ah->grh.sgid_index, dev->mdev->caps.port[port - 1].gid_table_len);
1324 return -EINVAL; 1324 return -EINVAL;
1325 } 1325 }
1326 1326
@@ -1539,7 +1539,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1539 err = -EINVAL; 1539 err = -EINVAL;
1540 goto out; 1540 goto out;
1541 } 1541 }
1542 context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev.caps.log_max_msg; 1542 context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev->caps.log_max_msg;
1543 } 1543 }
1544 1544
1545 if (attr_mask & IB_QP_DEST_QPN) 1545 if (attr_mask & IB_QP_DEST_QPN)
@@ -1637,7 +1637,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1637 optpar = ib_mask_to_mlx5_opt(attr_mask); 1637 optpar = ib_mask_to_mlx5_opt(attr_mask);
1638 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st]; 1638 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
1639 in->optparam = cpu_to_be32(optpar); 1639 in->optparam = cpu_to_be32(optpar);
1640 err = mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(cur_state), 1640 err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
1641 to_mlx5_state(new_state), in, sqd_event, 1641 to_mlx5_state(new_state), in, sqd_event,
1642 &qp->mqp); 1642 &qp->mqp);
1643 if (err) 1643 if (err)
@@ -1699,21 +1699,21 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1699 goto out; 1699 goto out;
1700 1700
1701 if ((attr_mask & IB_QP_PORT) && 1701 if ((attr_mask & IB_QP_PORT) &&
1702 (attr->port_num == 0 || attr->port_num > dev->mdev.caps.num_ports)) 1702 (attr->port_num == 0 || attr->port_num > dev->mdev->caps.num_ports))
1703 goto out; 1703 goto out;
1704 1704
1705 if (attr_mask & IB_QP_PKEY_INDEX) { 1705 if (attr_mask & IB_QP_PKEY_INDEX) {
1706 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 1706 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1707 if (attr->pkey_index >= dev->mdev.caps.port[port - 1].pkey_table_len) 1707 if (attr->pkey_index >= dev->mdev->caps.port[port - 1].pkey_table_len)
1708 goto out; 1708 goto out;
1709 } 1709 }
1710 1710
1711 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 1711 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1712 attr->max_rd_atomic > dev->mdev.caps.max_ra_res_qp) 1712 attr->max_rd_atomic > dev->mdev->caps.max_ra_res_qp)
1713 goto out; 1713 goto out;
1714 1714
1715 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 1715 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1716 attr->max_dest_rd_atomic > dev->mdev.caps.max_ra_req_qp) 1716 attr->max_dest_rd_atomic > dev->mdev->caps.max_ra_req_qp)
1717 goto out; 1717 goto out;
1718 1718
1719 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 1719 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
@@ -2479,7 +2479,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2479{ 2479{
2480 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */ 2480 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
2481 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2481 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2482 struct mlx5_core_dev *mdev = &dev->mdev; 2482 struct mlx5_core_dev *mdev = dev->mdev;
2483 struct mlx5_ib_qp *qp = to_mqp(ibqp); 2483 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2484 struct mlx5_ib_mr *mr; 2484 struct mlx5_ib_mr *mr;
2485 struct mlx5_wqe_data_seg *dpseg; 2485 struct mlx5_wqe_data_seg *dpseg;
@@ -2888,7 +2888,7 @@ static int to_ib_qp_access_flags(int mlx5_flags)
2888static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr, 2888static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
2889 struct mlx5_qp_path *path) 2889 struct mlx5_qp_path *path)
2890{ 2890{
2891 struct mlx5_core_dev *dev = &ibdev->mdev; 2891 struct mlx5_core_dev *dev = ibdev->mdev;
2892 2892
2893 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr)); 2893 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
2894 ib_ah_attr->port_num = path->port; 2894 ib_ah_attr->port_num = path->port;
@@ -2931,7 +2931,7 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
2931 goto out; 2931 goto out;
2932 } 2932 }
2933 context = &outb->ctx; 2933 context = &outb->ctx;
2934 err = mlx5_core_qp_query(&dev->mdev, &qp->mqp, outb, sizeof(*outb)); 2934 err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
2935 if (err) 2935 if (err)
2936 goto out_free; 2936 goto out_free;
2937 2937
@@ -3014,14 +3014,14 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3014 struct mlx5_ib_xrcd *xrcd; 3014 struct mlx5_ib_xrcd *xrcd;
3015 int err; 3015 int err;
3016 3016
3017 if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC)) 3017 if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC))
3018 return ERR_PTR(-ENOSYS); 3018 return ERR_PTR(-ENOSYS);
3019 3019
3020 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); 3020 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
3021 if (!xrcd) 3021 if (!xrcd)
3022 return ERR_PTR(-ENOMEM); 3022 return ERR_PTR(-ENOMEM);
3023 3023
3024 err = mlx5_core_xrcd_alloc(&dev->mdev, &xrcd->xrcdn); 3024 err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
3025 if (err) { 3025 if (err) {
3026 kfree(xrcd); 3026 kfree(xrcd);
3027 return ERR_PTR(-ENOMEM); 3027 return ERR_PTR(-ENOMEM);
@@ -3036,7 +3036,7 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
3036 u32 xrcdn = to_mxrcd(xrcd)->xrcdn; 3036 u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
3037 int err; 3037 int err;
3038 3038
3039 err = mlx5_core_xrcd_dealloc(&dev->mdev, xrcdn); 3039 err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
3040 if (err) { 3040 if (err) {
3041 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn); 3041 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
3042 return err; 3042 return err;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 384af6dec5eb..70bd131ba646 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -159,7 +159,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
159 int page_shift; 159 int page_shift;
160 int npages; 160 int npages;
161 161
162 err = mlx5_db_alloc(&dev->mdev, &srq->db); 162 err = mlx5_db_alloc(dev->mdev, &srq->db);
163 if (err) { 163 if (err) {
164 mlx5_ib_warn(dev, "alloc dbell rec failed\n"); 164 mlx5_ib_warn(dev, "alloc dbell rec failed\n");
165 return err; 165 return err;
@@ -167,7 +167,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
167 167
168 *srq->db.db = 0; 168 *srq->db.db = 0;
169 169
170 if (mlx5_buf_alloc(&dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) { 170 if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
171 mlx5_ib_dbg(dev, "buf alloc failed\n"); 171 mlx5_ib_dbg(dev, "buf alloc failed\n");
172 err = -ENOMEM; 172 err = -ENOMEM;
173 goto err_db; 173 goto err_db;
@@ -212,10 +212,10 @@ err_in:
212 mlx5_vfree(*in); 212 mlx5_vfree(*in);
213 213
214err_buf: 214err_buf:
215 mlx5_buf_free(&dev->mdev, &srq->buf); 215 mlx5_buf_free(dev->mdev, &srq->buf);
216 216
217err_db: 217err_db:
218 mlx5_db_free(&dev->mdev, &srq->db); 218 mlx5_db_free(dev->mdev, &srq->db);
219 return err; 219 return err;
220} 220}
221 221
@@ -229,8 +229,8 @@ static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
229static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq) 229static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
230{ 230{
231 kfree(srq->wrid); 231 kfree(srq->wrid);
232 mlx5_buf_free(&dev->mdev, &srq->buf); 232 mlx5_buf_free(dev->mdev, &srq->buf);
233 mlx5_db_free(&dev->mdev, &srq->db); 233 mlx5_db_free(dev->mdev, &srq->db);
234} 234}
235 235
236struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, 236struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
@@ -248,10 +248,10 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
248 u32 flgs, xrcdn; 248 u32 flgs, xrcdn;
249 249
250 /* Sanity check SRQ size before proceeding */ 250 /* Sanity check SRQ size before proceeding */
251 if (init_attr->attr.max_wr >= dev->mdev.caps.max_srq_wqes) { 251 if (init_attr->attr.max_wr >= dev->mdev->caps.max_srq_wqes) {
252 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", 252 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
253 init_attr->attr.max_wr, 253 init_attr->attr.max_wr,
254 dev->mdev.caps.max_srq_wqes); 254 dev->mdev->caps.max_srq_wqes);
255 return ERR_PTR(-EINVAL); 255 return ERR_PTR(-EINVAL);
256 } 256 }
257 257
@@ -303,7 +303,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
303 303
304 in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn); 304 in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
305 in->ctx.db_record = cpu_to_be64(srq->db.dma); 305 in->ctx.db_record = cpu_to_be64(srq->db.dma);
306 err = mlx5_core_create_srq(&dev->mdev, &srq->msrq, in, inlen); 306 err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen);
307 mlx5_vfree(in); 307 mlx5_vfree(in);
308 if (err) { 308 if (err) {
309 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); 309 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
@@ -327,7 +327,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
327 return &srq->ibsrq; 327 return &srq->ibsrq;
328 328
329err_core: 329err_core:
330 mlx5_core_destroy_srq(&dev->mdev, &srq->msrq); 330 mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
331 331
332err_usr_kern_srq: 332err_usr_kern_srq:
333 if (pd->uobject) 333 if (pd->uobject)
@@ -357,7 +357,7 @@ int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
357 return -EINVAL; 357 return -EINVAL;
358 358
359 mutex_lock(&srq->mutex); 359 mutex_lock(&srq->mutex);
360 ret = mlx5_core_arm_srq(&dev->mdev, &srq->msrq, attr->srq_limit, 1); 360 ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1);
361 mutex_unlock(&srq->mutex); 361 mutex_unlock(&srq->mutex);
362 362
363 if (ret) 363 if (ret)
@@ -378,7 +378,7 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
378 if (!out) 378 if (!out)
379 return -ENOMEM; 379 return -ENOMEM;
380 380
381 ret = mlx5_core_query_srq(&dev->mdev, &srq->msrq, out); 381 ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
382 if (ret) 382 if (ret)
383 goto out_box; 383 goto out_box;
384 384
@@ -396,7 +396,7 @@ int mlx5_ib_destroy_srq(struct ib_srq *srq)
396 struct mlx5_ib_dev *dev = to_mdev(srq->device); 396 struct mlx5_ib_dev *dev = to_mdev(srq->device);
397 struct mlx5_ib_srq *msrq = to_msrq(srq); 397 struct mlx5_ib_srq *msrq = to_msrq(srq);
398 398
399 mlx5_core_destroy_srq(&dev->mdev, &msrq->msrq); 399 mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);
400 400
401 if (srq->uobject) { 401 if (srq->uobject) {
402 mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); 402 mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index ee24f132e319..4b7f9da4bf11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -58,7 +58,100 @@ int mlx5_core_debug_mask;
58module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644); 58module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
59MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); 59MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
60 60
61#define MLX5_DEFAULT_PROF 2
62static int prof_sel = MLX5_DEFAULT_PROF;
63module_param_named(prof_sel, prof_sel, int, 0444);
64MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
65
61struct workqueue_struct *mlx5_core_wq; 66struct workqueue_struct *mlx5_core_wq;
67static LIST_HEAD(intf_list);
68static LIST_HEAD(dev_list);
69static DEFINE_MUTEX(intf_mutex);
70
71struct mlx5_device_context {
72 struct list_head list;
73 struct mlx5_interface *intf;
74 void *context;
75};
76
77static struct mlx5_profile profile[] = {
78 [0] = {
79 .mask = 0,
80 },
81 [1] = {
82 .mask = MLX5_PROF_MASK_QP_SIZE,
83 .log_max_qp = 12,
84 },
85 [2] = {
86 .mask = MLX5_PROF_MASK_QP_SIZE |
87 MLX5_PROF_MASK_MR_CACHE,
88 .log_max_qp = 17,
89 .mr_cache[0] = {
90 .size = 500,
91 .limit = 250
92 },
93 .mr_cache[1] = {
94 .size = 500,
95 .limit = 250
96 },
97 .mr_cache[2] = {
98 .size = 500,
99 .limit = 250
100 },
101 .mr_cache[3] = {
102 .size = 500,
103 .limit = 250
104 },
105 .mr_cache[4] = {
106 .size = 500,
107 .limit = 250
108 },
109 .mr_cache[5] = {
110 .size = 500,
111 .limit = 250
112 },
113 .mr_cache[6] = {
114 .size = 500,
115 .limit = 250
116 },
117 .mr_cache[7] = {
118 .size = 500,
119 .limit = 250
120 },
121 .mr_cache[8] = {
122 .size = 500,
123 .limit = 250
124 },
125 .mr_cache[9] = {
126 .size = 500,
127 .limit = 250
128 },
129 .mr_cache[10] = {
130 .size = 500,
131 .limit = 250
132 },
133 .mr_cache[11] = {
134 .size = 500,
135 .limit = 250
136 },
137 .mr_cache[12] = {
138 .size = 64,
139 .limit = 32
140 },
141 .mr_cache[13] = {
142 .size = 32,
143 .limit = 16
144 },
145 .mr_cache[14] = {
146 .size = 16,
147 .limit = 8
148 },
149 .mr_cache[15] = {
150 .size = 8,
151 .limit = 4
152 },
153 },
154};
62 155
63static int set_dma_caps(struct pci_dev *pdev) 156static int set_dma_caps(struct pci_dev *pdev)
64{ 157{
@@ -299,7 +392,7 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
299 return 0; 392 return 0;
300} 393}
301 394
302int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) 395static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
303{ 396{
304 struct mlx5_priv *priv = &dev->priv; 397 struct mlx5_priv *priv = &dev->priv;
305 int err; 398 int err;
@@ -489,7 +582,7 @@ err_dbg:
489} 582}
490EXPORT_SYMBOL(mlx5_dev_init); 583EXPORT_SYMBOL(mlx5_dev_init);
491 584
492void mlx5_dev_cleanup(struct mlx5_core_dev *dev) 585static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
493{ 586{
494 struct mlx5_priv *priv = &dev->priv; 587 struct mlx5_priv *priv = &dev->priv;
495 588
@@ -516,7 +609,190 @@ void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
516 pci_disable_device(dev->pdev); 609 pci_disable_device(dev->pdev);
517 debugfs_remove(priv->dbg_root); 610 debugfs_remove(priv->dbg_root);
518} 611}
519EXPORT_SYMBOL(mlx5_dev_cleanup); 612
613static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
614{
615 struct mlx5_device_context *dev_ctx;
616 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
617
618 dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
619 if (!dev_ctx) {
620 pr_warn("mlx5_add_device: alloc context failed\n");
621 return;
622 }
623
624 dev_ctx->intf = intf;
625 dev_ctx->context = intf->add(dev);
626
627 if (dev_ctx->context) {
628 spin_lock_irq(&priv->ctx_lock);
629 list_add_tail(&dev_ctx->list, &priv->ctx_list);
630 spin_unlock_irq(&priv->ctx_lock);
631 } else {
632 kfree(dev_ctx);
633 }
634}
635
636static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
637{
638 struct mlx5_device_context *dev_ctx;
639 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
640
641 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
642 if (dev_ctx->intf == intf) {
643 spin_lock_irq(&priv->ctx_lock);
644 list_del(&dev_ctx->list);
645 spin_unlock_irq(&priv->ctx_lock);
646
647 intf->remove(dev, dev_ctx->context);
648 kfree(dev_ctx);
649 return;
650 }
651}
652static int mlx5_register_device(struct mlx5_core_dev *dev)
653{
654 struct mlx5_priv *priv = &dev->priv;
655 struct mlx5_interface *intf;
656
657 mutex_lock(&intf_mutex);
658 list_add_tail(&priv->dev_list, &dev_list);
659 list_for_each_entry(intf, &intf_list, list)
660 mlx5_add_device(intf, priv);
661 mutex_unlock(&intf_mutex);
662
663 return 0;
664}
665static void mlx5_unregister_device(struct mlx5_core_dev *dev)
666{
667 struct mlx5_priv *priv = &dev->priv;
668 struct mlx5_interface *intf;
669
670 mutex_lock(&intf_mutex);
671 list_for_each_entry(intf, &intf_list, list)
672 mlx5_remove_device(intf, priv);
673 list_del(&priv->dev_list);
674 mutex_unlock(&intf_mutex);
675}
676
677int mlx5_register_interface(struct mlx5_interface *intf)
678{
679 struct mlx5_priv *priv;
680
681 if (!intf->add || !intf->remove)
682 return -EINVAL;
683
684 mutex_lock(&intf_mutex);
685 list_add_tail(&intf->list, &intf_list);
686 list_for_each_entry(priv, &dev_list, dev_list)
687 mlx5_add_device(intf, priv);
688 mutex_unlock(&intf_mutex);
689
690 return 0;
691}
692EXPORT_SYMBOL(mlx5_register_interface);
693
694void mlx5_unregister_interface(struct mlx5_interface *intf)
695{
696 struct mlx5_priv *priv;
697
698 mutex_lock(&intf_mutex);
699 list_for_each_entry(priv, &dev_list, dev_list)
700 mlx5_remove_device(intf, priv);
701 list_del(&intf->list);
702 mutex_unlock(&intf_mutex);
703}
704EXPORT_SYMBOL(mlx5_unregister_interface);
705
706static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
707 void *data)
708{
709 struct mlx5_priv *priv = &dev->priv;
710 struct mlx5_device_context *dev_ctx;
711 unsigned long flags;
712
713 spin_lock_irqsave(&priv->ctx_lock, flags);
714
715 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
716 if (dev_ctx->intf->event)
717 dev_ctx->intf->event(dev, dev_ctx->context, event, data);
718
719 spin_unlock_irqrestore(&priv->ctx_lock, flags);
720}
721
722struct mlx5_core_event_handler {
723 void (*event)(struct mlx5_core_dev *dev,
724 enum mlx5_dev_event event,
725 void *data);
726};
727
728static int init_one(struct pci_dev *pdev,
729 const struct pci_device_id *id)
730{
731 struct mlx5_core_dev *dev;
732 struct mlx5_priv *priv;
733 int err;
734
735 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
736 if (!dev) {
737 dev_err(&pdev->dev, "kzalloc failed\n");
738 return -ENOMEM;
739 }
740 priv = &dev->priv;
741
742 pci_set_drvdata(pdev, dev);
743
744 if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
745 pr_warn("selected profile out of range, selecting default (%d)\n",
746 MLX5_DEFAULT_PROF);
747 prof_sel = MLX5_DEFAULT_PROF;
748 }
749 dev->profile = &profile[prof_sel];
750 dev->event = mlx5_core_event;
751
752 err = mlx5_dev_init(dev, pdev);
753 if (err) {
754 dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
755 goto out;
756 }
757
758 INIT_LIST_HEAD(&priv->ctx_list);
759 spin_lock_init(&priv->ctx_lock);
760 err = mlx5_register_device(dev);
761 if (err) {
762 dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
763 goto out_init;
764 }
765
766 return 0;
767
768out_init:
769 mlx5_dev_cleanup(dev);
770out:
771 kfree(dev);
772 return err;
773}
774static void remove_one(struct pci_dev *pdev)
775{
776 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
777
778 mlx5_unregister_device(dev);
779 mlx5_dev_cleanup(dev);
780 kfree(dev);
781}
782
783static const struct pci_device_id mlx5_core_pci_table[] = {
784 { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */
785 { 0, }
786};
787
788MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
789
790static struct pci_driver mlx5_core_driver = {
791 .name = DRIVER_NAME,
792 .id_table = mlx5_core_pci_table,
793 .probe = init_one,
794 .remove = remove_one
795};
520 796
521static int __init init(void) 797static int __init init(void)
522{ 798{
@@ -530,8 +806,15 @@ static int __init init(void)
530 } 806 }
531 mlx5_health_init(); 807 mlx5_health_init();
532 808
809 err = pci_register_driver(&mlx5_core_driver);
810 if (err)
811 goto err_health;
812
533 return 0; 813 return 0;
534 814
815err_health:
816 mlx5_health_cleanup();
817 destroy_workqueue(mlx5_core_wq);
535err_debug: 818err_debug:
536 mlx5_unregister_debugfs(); 819 mlx5_unregister_debugfs();
537 return err; 820 return err;
@@ -539,6 +822,7 @@ err_debug:
539 822
540static void __exit cleanup(void) 823static void __exit cleanup(void)
541{ 824{
825 pci_unregister_driver(&mlx5_core_driver);
542 mlx5_health_cleanup(); 826 mlx5_health_cleanup();
543 destroy_workqueue(mlx5_core_wq); 827 destroy_workqueue(mlx5_core_wq);
544 mlx5_unregister_debugfs(); 828 mlx5_unregister_debugfs();
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 2bce4aad2570..d0cb5984a45f 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -543,6 +543,10 @@ struct mlx5_priv {
543 /* protect mkey key part */ 543 /* protect mkey key part */
544 spinlock_t mkey_lock; 544 spinlock_t mkey_lock;
545 u8 mkey_key; 545 u8 mkey_key;
546
547 struct list_head dev_list;
548 struct list_head ctx_list;
549 spinlock_t ctx_lock;
546}; 550};
547 551
548struct mlx5_core_dev { 552struct mlx5_core_dev {
@@ -686,8 +690,6 @@ static inline u32 mlx5_base_mkey(const u32 key)
686 return key & 0xffffff00u; 690 return key & 0xffffff00u;
687} 691}
688 692
689int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev);
690void mlx5_dev_cleanup(struct mlx5_core_dev *dev);
691int mlx5_cmd_init(struct mlx5_core_dev *dev); 693int mlx5_cmd_init(struct mlx5_core_dev *dev);
692void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); 694void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
693void mlx5_cmd_use_events(struct mlx5_core_dev *dev); 695void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
@@ -811,6 +813,17 @@ enum {
811 MAX_MR_CACHE_ENTRIES = 16, 813 MAX_MR_CACHE_ENTRIES = 16,
812}; 814};
813 815
816struct mlx5_interface {
817 void * (*add)(struct mlx5_core_dev *dev);
818 void (*remove)(struct mlx5_core_dev *dev, void *context);
819 void (*event)(struct mlx5_core_dev *dev, void *context,
820 enum mlx5_dev_event event, void *data);
821 struct list_head list;
822};
823
824int mlx5_register_interface(struct mlx5_interface *intf);
825void mlx5_unregister_interface(struct mlx5_interface *intf);
826
814struct mlx5_profile { 827struct mlx5_profile {
815 u64 mask; 828 u64 mask;
816 u32 log_max_qp; 829 u32 log_max_qp;