diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx4')
-rw-r--r-- | drivers/infiniband/hw/mlx4/ah.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/alias_GUID.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/cq.c | 57 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mad.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 246 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mlx4_ib.h | 26 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mr.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 90 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/srq.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/sysfs.c | 6 |
10 files changed, 395 insertions, 50 deletions
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 2d8c3397774f..f50a546224ad 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
37 | #include <linux/inet.h> | 37 | #include <linux/inet.h> |
38 | #include <linux/string.h> | 38 | #include <linux/string.h> |
39 | #include <linux/mlx4/driver.h> | ||
39 | 40 | ||
40 | #include "mlx4_ib.h" | 41 | #include "mlx4_ib.h" |
41 | 42 | ||
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index 0eb141c41416..a31e031afd87 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c | |||
@@ -154,7 +154,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, | |||
154 | continue; | 154 | continue; |
155 | 155 | ||
156 | slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ; | 156 | slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ; |
157 | if (slave_id >= dev->dev->num_vfs + 1) | 157 | if (slave_id >= dev->dev->persist->num_vfs + 1) |
158 | return; | 158 | return; |
159 | tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE]; | 159 | tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE]; |
160 | form_cache_ag = get_cached_alias_guid(dev, port_num, | 160 | form_cache_ag = get_cached_alias_guid(dev, port_num, |
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index a3b70f6c4035..543ecdd8667b 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -188,6 +188,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector | |||
188 | spin_lock_init(&cq->lock); | 188 | spin_lock_init(&cq->lock); |
189 | cq->resize_buf = NULL; | 189 | cq->resize_buf = NULL; |
190 | cq->resize_umem = NULL; | 190 | cq->resize_umem = NULL; |
191 | INIT_LIST_HEAD(&cq->send_qp_list); | ||
192 | INIT_LIST_HEAD(&cq->recv_qp_list); | ||
191 | 193 | ||
192 | if (context) { | 194 | if (context) { |
193 | struct mlx4_ib_create_cq ucmd; | 195 | struct mlx4_ib_create_cq ucmd; |
@@ -594,6 +596,55 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct | |||
594 | return 0; | 596 | return 0; |
595 | } | 597 | } |
596 | 598 | ||
599 | static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries, | ||
600 | struct ib_wc *wc, int *npolled, int is_send) | ||
601 | { | ||
602 | struct mlx4_ib_wq *wq; | ||
603 | unsigned cur; | ||
604 | int i; | ||
605 | |||
606 | wq = is_send ? &qp->sq : &qp->rq; | ||
607 | cur = wq->head - wq->tail; | ||
608 | |||
609 | if (cur == 0) | ||
610 | return; | ||
611 | |||
612 | for (i = 0; i < cur && *npolled < num_entries; i++) { | ||
613 | wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; | ||
614 | wc->status = IB_WC_WR_FLUSH_ERR; | ||
615 | wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR; | ||
616 | wq->tail++; | ||
617 | (*npolled)++; | ||
618 | wc->qp = &qp->ibqp; | ||
619 | wc++; | ||
620 | } | ||
621 | } | ||
622 | |||
623 | static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries, | ||
624 | struct ib_wc *wc, int *npolled) | ||
625 | { | ||
626 | struct mlx4_ib_qp *qp; | ||
627 | |||
628 | *npolled = 0; | ||
629 | /* Find uncompleted WQEs belonging to that cq and retrun | ||
630 | * simulated FLUSH_ERR completions | ||
631 | */ | ||
632 | list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) { | ||
633 | mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1); | ||
634 | if (*npolled >= num_entries) | ||
635 | goto out; | ||
636 | } | ||
637 | |||
638 | list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) { | ||
639 | mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0); | ||
640 | if (*npolled >= num_entries) | ||
641 | goto out; | ||
642 | } | ||
643 | |||
644 | out: | ||
645 | return; | ||
646 | } | ||
647 | |||
597 | static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, | 648 | static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, |
598 | struct mlx4_ib_qp **cur_qp, | 649 | struct mlx4_ib_qp **cur_qp, |
599 | struct ib_wc *wc) | 650 | struct ib_wc *wc) |
@@ -836,8 +887,13 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
836 | unsigned long flags; | 887 | unsigned long flags; |
837 | int npolled; | 888 | int npolled; |
838 | int err = 0; | 889 | int err = 0; |
890 | struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device); | ||
839 | 891 | ||
840 | spin_lock_irqsave(&cq->lock, flags); | 892 | spin_lock_irqsave(&cq->lock, flags); |
893 | if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { | ||
894 | mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled); | ||
895 | goto out; | ||
896 | } | ||
841 | 897 | ||
842 | for (npolled = 0; npolled < num_entries; ++npolled) { | 898 | for (npolled = 0; npolled < num_entries; ++npolled) { |
843 | err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled); | 899 | err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled); |
@@ -847,6 +903,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
847 | 903 | ||
848 | mlx4_cq_set_ci(&cq->mcq); | 904 | mlx4_cq_set_ci(&cq->mcq); |
849 | 905 | ||
906 | out: | ||
850 | spin_unlock_irqrestore(&cq->lock, flags); | 907 | spin_unlock_irqrestore(&cq->lock, flags); |
851 | 908 | ||
852 | if (err == 0 || err == -EAGAIN) | 909 | if (err == 0 || err == -EAGAIN) |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 82a7dd87089b..c7619716c31d 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -1951,7 +1951,8 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, | |||
1951 | ctx->ib_dev = &dev->ib_dev; | 1951 | ctx->ib_dev = &dev->ib_dev; |
1952 | 1952 | ||
1953 | for (i = 0; | 1953 | for (i = 0; |
1954 | i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->num_vfs + 1)); | 1954 | i < min(dev->dev->caps.sqp_demux, |
1955 | (u16)(dev->dev->persist->num_vfs + 1)); | ||
1955 | i++) { | 1956 | i++) { |
1956 | struct mlx4_active_ports actv_ports = | 1957 | struct mlx4_active_ports actv_ports = |
1957 | mlx4_get_active_ports(dev->dev, i); | 1958 | mlx4_get_active_ports(dev->dev, i); |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 9117b7a2d5f8..eb8e215f1613 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -198,7 +198,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
198 | 198 | ||
199 | props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & | 199 | props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & |
200 | 0xffffff; | 200 | 0xffffff; |
201 | props->vendor_part_id = dev->dev->pdev->device; | 201 | props->vendor_part_id = dev->dev->persist->pdev->device; |
202 | props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); | 202 | props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); |
203 | memcpy(&props->sys_image_guid, out_mad->data + 4, 8); | 203 | memcpy(&props->sys_image_guid, out_mad->data + 4, 8); |
204 | 204 | ||
@@ -351,6 +351,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
351 | enum ib_mtu tmp; | 351 | enum ib_mtu tmp; |
352 | struct mlx4_cmd_mailbox *mailbox; | 352 | struct mlx4_cmd_mailbox *mailbox; |
353 | int err = 0; | 353 | int err = 0; |
354 | int is_bonded = mlx4_is_bonded(mdev->dev); | ||
354 | 355 | ||
355 | mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); | 356 | mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); |
356 | if (IS_ERR(mailbox)) | 357 | if (IS_ERR(mailbox)) |
@@ -374,8 +375,12 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
374 | props->state = IB_PORT_DOWN; | 375 | props->state = IB_PORT_DOWN; |
375 | props->phys_state = state_to_phys_state(props->state); | 376 | props->phys_state = state_to_phys_state(props->state); |
376 | props->active_mtu = IB_MTU_256; | 377 | props->active_mtu = IB_MTU_256; |
378 | if (is_bonded) | ||
379 | rtnl_lock(); /* required to get upper dev */ | ||
377 | spin_lock_bh(&iboe->lock); | 380 | spin_lock_bh(&iboe->lock); |
378 | ndev = iboe->netdevs[port - 1]; | 381 | ndev = iboe->netdevs[port - 1]; |
382 | if (ndev && is_bonded) | ||
383 | ndev = netdev_master_upper_dev_get(ndev); | ||
379 | if (!ndev) | 384 | if (!ndev) |
380 | goto out_unlock; | 385 | goto out_unlock; |
381 | 386 | ||
@@ -387,6 +392,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
387 | props->phys_state = state_to_phys_state(props->state); | 392 | props->phys_state = state_to_phys_state(props->state); |
388 | out_unlock: | 393 | out_unlock: |
389 | spin_unlock_bh(&iboe->lock); | 394 | spin_unlock_bh(&iboe->lock); |
395 | if (is_bonded) | ||
396 | rtnl_unlock(); | ||
390 | out: | 397 | out: |
391 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); | 398 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); |
392 | return err; | 399 | return err; |
@@ -844,7 +851,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, | |||
844 | 851 | ||
845 | struct mlx4_ib_steering { | 852 | struct mlx4_ib_steering { |
846 | struct list_head list; | 853 | struct list_head list; |
847 | u64 reg_id; | 854 | struct mlx4_flow_reg_id reg_id; |
848 | union ib_gid gid; | 855 | union ib_gid gid; |
849 | }; | 856 | }; |
850 | 857 | ||
@@ -1135,9 +1142,11 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, | |||
1135 | struct ib_flow_attr *flow_attr, | 1142 | struct ib_flow_attr *flow_attr, |
1136 | int domain) | 1143 | int domain) |
1137 | { | 1144 | { |
1138 | int err = 0, i = 0; | 1145 | int err = 0, i = 0, j = 0; |
1139 | struct mlx4_ib_flow *mflow; | 1146 | struct mlx4_ib_flow *mflow; |
1140 | enum mlx4_net_trans_promisc_mode type[2]; | 1147 | enum mlx4_net_trans_promisc_mode type[2]; |
1148 | struct mlx4_dev *dev = (to_mdev(qp->device))->dev; | ||
1149 | int is_bonded = mlx4_is_bonded(dev); | ||
1141 | 1150 | ||
1142 | memset(type, 0, sizeof(type)); | 1151 | memset(type, 0, sizeof(type)); |
1143 | 1152 | ||
@@ -1172,26 +1181,58 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, | |||
1172 | 1181 | ||
1173 | while (i < ARRAY_SIZE(type) && type[i]) { | 1182 | while (i < ARRAY_SIZE(type) && type[i]) { |
1174 | err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], | 1183 | err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], |
1175 | &mflow->reg_id[i]); | 1184 | &mflow->reg_id[i].id); |
1176 | if (err) | 1185 | if (err) |
1177 | goto err_create_flow; | 1186 | goto err_create_flow; |
1178 | i++; | 1187 | i++; |
1188 | if (is_bonded) { | ||
1189 | /* Application always sees one port so the mirror rule | ||
1190 | * must be on port #2 | ||
1191 | */ | ||
1192 | flow_attr->port = 2; | ||
1193 | err = __mlx4_ib_create_flow(qp, flow_attr, | ||
1194 | domain, type[j], | ||
1195 | &mflow->reg_id[j].mirror); | ||
1196 | flow_attr->port = 1; | ||
1197 | if (err) | ||
1198 | goto err_create_flow; | ||
1199 | j++; | ||
1200 | } | ||
1201 | |||
1179 | } | 1202 | } |
1180 | 1203 | ||
1181 | if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { | 1204 | if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { |
1182 | err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]); | 1205 | err = mlx4_ib_tunnel_steer_add(qp, flow_attr, |
1206 | &mflow->reg_id[i].id); | ||
1183 | if (err) | 1207 | if (err) |
1184 | goto err_create_flow; | 1208 | goto err_create_flow; |
1185 | i++; | 1209 | i++; |
1210 | if (is_bonded) { | ||
1211 | flow_attr->port = 2; | ||
1212 | err = mlx4_ib_tunnel_steer_add(qp, flow_attr, | ||
1213 | &mflow->reg_id[j].mirror); | ||
1214 | flow_attr->port = 1; | ||
1215 | if (err) | ||
1216 | goto err_create_flow; | ||
1217 | j++; | ||
1218 | } | ||
1219 | /* function to create mirror rule */ | ||
1186 | } | 1220 | } |
1187 | 1221 | ||
1188 | return &mflow->ibflow; | 1222 | return &mflow->ibflow; |
1189 | 1223 | ||
1190 | err_create_flow: | 1224 | err_create_flow: |
1191 | while (i) { | 1225 | while (i) { |
1192 | (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]); | 1226 | (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, |
1227 | mflow->reg_id[i].id); | ||
1193 | i--; | 1228 | i--; |
1194 | } | 1229 | } |
1230 | |||
1231 | while (j) { | ||
1232 | (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, | ||
1233 | mflow->reg_id[j].mirror); | ||
1234 | j--; | ||
1235 | } | ||
1195 | err_free: | 1236 | err_free: |
1196 | kfree(mflow); | 1237 | kfree(mflow); |
1197 | return ERR_PTR(err); | 1238 | return ERR_PTR(err); |
@@ -1204,10 +1245,16 @@ static int mlx4_ib_destroy_flow(struct ib_flow *flow_id) | |||
1204 | struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device); | 1245 | struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device); |
1205 | struct mlx4_ib_flow *mflow = to_mflow(flow_id); | 1246 | struct mlx4_ib_flow *mflow = to_mflow(flow_id); |
1206 | 1247 | ||
1207 | while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) { | 1248 | while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) { |
1208 | err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]); | 1249 | err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id); |
1209 | if (err) | 1250 | if (err) |
1210 | ret = err; | 1251 | ret = err; |
1252 | if (mflow->reg_id[i].mirror) { | ||
1253 | err = __mlx4_ib_destroy_flow(mdev->dev, | ||
1254 | mflow->reg_id[i].mirror); | ||
1255 | if (err) | ||
1256 | ret = err; | ||
1257 | } | ||
1211 | i++; | 1258 | i++; |
1212 | } | 1259 | } |
1213 | 1260 | ||
@@ -1219,11 +1266,12 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
1219 | { | 1266 | { |
1220 | int err; | 1267 | int err; |
1221 | struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); | 1268 | struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); |
1269 | struct mlx4_dev *dev = mdev->dev; | ||
1222 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); | 1270 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); |
1223 | u64 reg_id; | ||
1224 | struct mlx4_ib_steering *ib_steering = NULL; | 1271 | struct mlx4_ib_steering *ib_steering = NULL; |
1225 | enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? | 1272 | enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? |
1226 | MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; | 1273 | MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; |
1274 | struct mlx4_flow_reg_id reg_id; | ||
1227 | 1275 | ||
1228 | if (mdev->dev->caps.steering_mode == | 1276 | if (mdev->dev->caps.steering_mode == |
1229 | MLX4_STEERING_MODE_DEVICE_MANAGED) { | 1277 | MLX4_STEERING_MODE_DEVICE_MANAGED) { |
@@ -1235,10 +1283,21 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
1235 | err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, | 1283 | err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, |
1236 | !!(mqp->flags & | 1284 | !!(mqp->flags & |
1237 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), | 1285 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), |
1238 | prot, ®_id); | 1286 | prot, ®_id.id); |
1239 | if (err) | 1287 | if (err) |
1240 | goto err_malloc; | 1288 | goto err_malloc; |
1241 | 1289 | ||
1290 | reg_id.mirror = 0; | ||
1291 | if (mlx4_is_bonded(dev)) { | ||
1292 | err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, | ||
1293 | (mqp->port == 1) ? 2 : 1, | ||
1294 | !!(mqp->flags & | ||
1295 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), | ||
1296 | prot, ®_id.mirror); | ||
1297 | if (err) | ||
1298 | goto err_add; | ||
1299 | } | ||
1300 | |||
1242 | err = add_gid_entry(ibqp, gid); | 1301 | err = add_gid_entry(ibqp, gid); |
1243 | if (err) | 1302 | if (err) |
1244 | goto err_add; | 1303 | goto err_add; |
@@ -1254,7 +1313,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
1254 | 1313 | ||
1255 | err_add: | 1314 | err_add: |
1256 | mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, | 1315 | mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, |
1257 | prot, reg_id); | 1316 | prot, reg_id.id); |
1317 | if (reg_id.mirror) | ||
1318 | mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, | ||
1319 | prot, reg_id.mirror); | ||
1258 | err_malloc: | 1320 | err_malloc: |
1259 | kfree(ib_steering); | 1321 | kfree(ib_steering); |
1260 | 1322 | ||
@@ -1281,10 +1343,12 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
1281 | { | 1343 | { |
1282 | int err; | 1344 | int err; |
1283 | struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); | 1345 | struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); |
1346 | struct mlx4_dev *dev = mdev->dev; | ||
1284 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); | 1347 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); |
1285 | struct net_device *ndev; | 1348 | struct net_device *ndev; |
1286 | struct mlx4_ib_gid_entry *ge; | 1349 | struct mlx4_ib_gid_entry *ge; |
1287 | u64 reg_id = 0; | 1350 | struct mlx4_flow_reg_id reg_id = {0, 0}; |
1351 | |||
1288 | enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? | 1352 | enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? |
1289 | MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; | 1353 | MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; |
1290 | 1354 | ||
@@ -1309,10 +1373,17 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
1309 | } | 1373 | } |
1310 | 1374 | ||
1311 | err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, | 1375 | err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, |
1312 | prot, reg_id); | 1376 | prot, reg_id.id); |
1313 | if (err) | 1377 | if (err) |
1314 | return err; | 1378 | return err; |
1315 | 1379 | ||
1380 | if (mlx4_is_bonded(dev)) { | ||
1381 | err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, | ||
1382 | prot, reg_id.mirror); | ||
1383 | if (err) | ||
1384 | return err; | ||
1385 | } | ||
1386 | |||
1316 | mutex_lock(&mqp->mutex); | 1387 | mutex_lock(&mqp->mutex); |
1317 | ge = find_gid_entry(mqp, gid->raw); | 1388 | ge = find_gid_entry(mqp, gid->raw); |
1318 | if (ge) { | 1389 | if (ge) { |
@@ -1376,7 +1447,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr, | |||
1376 | { | 1447 | { |
1377 | struct mlx4_ib_dev *dev = | 1448 | struct mlx4_ib_dev *dev = |
1378 | container_of(device, struct mlx4_ib_dev, ib_dev.dev); | 1449 | container_of(device, struct mlx4_ib_dev, ib_dev.dev); |
1379 | return sprintf(buf, "MT%d\n", dev->dev->pdev->device); | 1450 | return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device); |
1380 | } | 1451 | } |
1381 | 1452 | ||
1382 | static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, | 1453 | static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, |
@@ -1440,6 +1511,7 @@ static void update_gids_task(struct work_struct *work) | |||
1440 | union ib_gid *gids; | 1511 | union ib_gid *gids; |
1441 | int err; | 1512 | int err; |
1442 | struct mlx4_dev *dev = gw->dev->dev; | 1513 | struct mlx4_dev *dev = gw->dev->dev; |
1514 | int is_bonded = mlx4_is_bonded(dev); | ||
1443 | 1515 | ||
1444 | if (!gw->dev->ib_active) | 1516 | if (!gw->dev->ib_active) |
1445 | return; | 1517 | return; |
@@ -1459,7 +1531,10 @@ static void update_gids_task(struct work_struct *work) | |||
1459 | if (err) | 1531 | if (err) |
1460 | pr_warn("set port command failed\n"); | 1532 | pr_warn("set port command failed\n"); |
1461 | else | 1533 | else |
1462 | mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE); | 1534 | if ((gw->port == 1) || !is_bonded) |
1535 | mlx4_ib_dispatch_event(gw->dev, | ||
1536 | is_bonded ? 1 : gw->port, | ||
1537 | IB_EVENT_GID_CHANGE); | ||
1463 | 1538 | ||
1464 | mlx4_free_cmd_mailbox(dev, mailbox); | 1539 | mlx4_free_cmd_mailbox(dev, mailbox); |
1465 | kfree(gw); | 1540 | kfree(gw); |
@@ -1875,7 +1950,8 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, | |||
1875 | * don't want the bond IP based gids in the table since | 1950 | * don't want the bond IP based gids in the table since |
1876 | * flows that select port by gid may get the down port. | 1951 | * flows that select port by gid may get the down port. |
1877 | */ | 1952 | */ |
1878 | if (port_state == IB_PORT_DOWN) { | 1953 | if (port_state == IB_PORT_DOWN && |
1954 | !mlx4_is_bonded(ibdev->dev)) { | ||
1879 | reset_gid_table(ibdev, port); | 1955 | reset_gid_table(ibdev, port); |
1880 | mlx4_ib_set_default_gid(ibdev, | 1956 | mlx4_ib_set_default_gid(ibdev, |
1881 | curr_netdev, | 1957 | curr_netdev, |
@@ -1938,7 +2014,8 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev) | |||
1938 | int i; | 2014 | int i; |
1939 | 2015 | ||
1940 | if (mlx4_is_master(ibdev->dev)) { | 2016 | if (mlx4_is_master(ibdev->dev)) { |
1941 | for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) { | 2017 | for (slave = 0; slave <= ibdev->dev->persist->num_vfs; |
2018 | ++slave) { | ||
1942 | for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { | 2019 | for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { |
1943 | for (i = 0; | 2020 | for (i = 0; |
1944 | i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; | 2021 | i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; |
@@ -1995,7 +2072,7 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) | |||
1995 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) { | 2072 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) { |
1996 | for (j = 0; j < eq_per_port; j++) { | 2073 | for (j = 0; j < eq_per_port; j++) { |
1997 | snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s", | 2074 | snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s", |
1998 | i, j, dev->pdev->bus->name); | 2075 | i, j, dev->persist->pdev->bus->name); |
1999 | /* Set IRQ for specific name (per ring) */ | 2076 | /* Set IRQ for specific name (per ring) */ |
2000 | if (mlx4_assign_eq(dev, name, NULL, | 2077 | if (mlx4_assign_eq(dev, name, NULL, |
2001 | &ibdev->eq_table[eq])) { | 2078 | &ibdev->eq_table[eq])) { |
@@ -2046,6 +2123,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2046 | int err; | 2123 | int err; |
2047 | struct mlx4_ib_iboe *iboe; | 2124 | struct mlx4_ib_iboe *iboe; |
2048 | int ib_num_ports = 0; | 2125 | int ib_num_ports = 0; |
2126 | int num_req_counters; | ||
2049 | 2127 | ||
2050 | pr_info_once("%s", mlx4_ib_version); | 2128 | pr_info_once("%s", mlx4_ib_version); |
2051 | 2129 | ||
@@ -2059,7 +2137,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2059 | 2137 | ||
2060 | ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); | 2138 | ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); |
2061 | if (!ibdev) { | 2139 | if (!ibdev) { |
2062 | dev_err(&dev->pdev->dev, "Device struct alloc failed\n"); | 2140 | dev_err(&dev->persist->pdev->dev, |
2141 | "Device struct alloc failed\n"); | ||
2063 | return NULL; | 2142 | return NULL; |
2064 | } | 2143 | } |
2065 | 2144 | ||
@@ -2078,15 +2157,17 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2078 | MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); | 2157 | MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); |
2079 | 2158 | ||
2080 | ibdev->dev = dev; | 2159 | ibdev->dev = dev; |
2160 | ibdev->bond_next_port = 0; | ||
2081 | 2161 | ||
2082 | strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); | 2162 | strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); |
2083 | ibdev->ib_dev.owner = THIS_MODULE; | 2163 | ibdev->ib_dev.owner = THIS_MODULE; |
2084 | ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; | 2164 | ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; |
2085 | ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; | 2165 | ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; |
2086 | ibdev->num_ports = num_ports; | 2166 | ibdev->num_ports = num_ports; |
2087 | ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; | 2167 | ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ? |
2168 | 1 : ibdev->num_ports; | ||
2088 | ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; | 2169 | ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; |
2089 | ibdev->ib_dev.dma_device = &dev->pdev->dev; | 2170 | ibdev->ib_dev.dma_device = &dev->persist->pdev->dev; |
2090 | 2171 | ||
2091 | if (dev->caps.userspace_caps) | 2172 | if (dev->caps.userspace_caps) |
2092 | ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; | 2173 | ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; |
@@ -2205,7 +2286,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2205 | if (init_node_data(ibdev)) | 2286 | if (init_node_data(ibdev)) |
2206 | goto err_map; | 2287 | goto err_map; |
2207 | 2288 | ||
2208 | for (i = 0; i < ibdev->num_ports; ++i) { | 2289 | num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; |
2290 | for (i = 0; i < num_req_counters; ++i) { | ||
2209 | mutex_init(&ibdev->qp1_proxy_lock[i]); | 2291 | mutex_init(&ibdev->qp1_proxy_lock[i]); |
2210 | if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == | 2292 | if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == |
2211 | IB_LINK_LAYER_ETHERNET) { | 2293 | IB_LINK_LAYER_ETHERNET) { |
@@ -2216,12 +2298,18 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2216 | ibdev->counters[i] = -1; | 2298 | ibdev->counters[i] = -1; |
2217 | } | 2299 | } |
2218 | } | 2300 | } |
2301 | if (mlx4_is_bonded(dev)) | ||
2302 | for (i = 1; i < ibdev->num_ports ; ++i) | ||
2303 | ibdev->counters[i] = ibdev->counters[0]; | ||
2304 | |||
2219 | 2305 | ||
2220 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | 2306 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) |
2221 | ib_num_ports++; | 2307 | ib_num_ports++; |
2222 | 2308 | ||
2223 | spin_lock_init(&ibdev->sm_lock); | 2309 | spin_lock_init(&ibdev->sm_lock); |
2224 | mutex_init(&ibdev->cap_mask_mutex); | 2310 | mutex_init(&ibdev->cap_mask_mutex); |
2311 | INIT_LIST_HEAD(&ibdev->qp_list); | ||
2312 | spin_lock_init(&ibdev->reset_flow_resource_lock); | ||
2225 | 2313 | ||
2226 | if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && | 2314 | if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && |
2227 | ib_num_ports) { | 2315 | ib_num_ports) { |
@@ -2237,7 +2325,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2237 | sizeof(long), | 2325 | sizeof(long), |
2238 | GFP_KERNEL); | 2326 | GFP_KERNEL); |
2239 | if (!ibdev->ib_uc_qpns_bitmap) { | 2327 | if (!ibdev->ib_uc_qpns_bitmap) { |
2240 | dev_err(&dev->pdev->dev, "bit map alloc failed\n"); | 2328 | dev_err(&dev->persist->pdev->dev, |
2329 | "bit map alloc failed\n"); | ||
2241 | goto err_steer_qp_release; | 2330 | goto err_steer_qp_release; |
2242 | } | 2331 | } |
2243 | 2332 | ||
@@ -2535,6 +2624,99 @@ out: | |||
2535 | return; | 2624 | return; |
2536 | } | 2625 | } |
2537 | 2626 | ||
2627 | static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev) | ||
2628 | { | ||
2629 | struct mlx4_ib_qp *mqp; | ||
2630 | unsigned long flags_qp; | ||
2631 | unsigned long flags_cq; | ||
2632 | struct mlx4_ib_cq *send_mcq, *recv_mcq; | ||
2633 | struct list_head cq_notify_list; | ||
2634 | struct mlx4_cq *mcq; | ||
2635 | unsigned long flags; | ||
2636 | |||
2637 | pr_warn("mlx4_ib_handle_catas_error was started\n"); | ||
2638 | INIT_LIST_HEAD(&cq_notify_list); | ||
2639 | |||
2640 | /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ | ||
2641 | spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); | ||
2642 | |||
2643 | list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { | ||
2644 | spin_lock_irqsave(&mqp->sq.lock, flags_qp); | ||
2645 | if (mqp->sq.tail != mqp->sq.head) { | ||
2646 | send_mcq = to_mcq(mqp->ibqp.send_cq); | ||
2647 | spin_lock_irqsave(&send_mcq->lock, flags_cq); | ||
2648 | if (send_mcq->mcq.comp && | ||
2649 | mqp->ibqp.send_cq->comp_handler) { | ||
2650 | if (!send_mcq->mcq.reset_notify_added) { | ||
2651 | send_mcq->mcq.reset_notify_added = 1; | ||
2652 | list_add_tail(&send_mcq->mcq.reset_notify, | ||
2653 | &cq_notify_list); | ||
2654 | } | ||
2655 | } | ||
2656 | spin_unlock_irqrestore(&send_mcq->lock, flags_cq); | ||
2657 | } | ||
2658 | spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); | ||
2659 | /* Now, handle the QP's receive queue */ | ||
2660 | spin_lock_irqsave(&mqp->rq.lock, flags_qp); | ||
2661 | /* no handling is needed for SRQ */ | ||
2662 | if (!mqp->ibqp.srq) { | ||
2663 | if (mqp->rq.tail != mqp->rq.head) { | ||
2664 | recv_mcq = to_mcq(mqp->ibqp.recv_cq); | ||
2665 | spin_lock_irqsave(&recv_mcq->lock, flags_cq); | ||
2666 | if (recv_mcq->mcq.comp && | ||
2667 | mqp->ibqp.recv_cq->comp_handler) { | ||
2668 | if (!recv_mcq->mcq.reset_notify_added) { | ||
2669 | recv_mcq->mcq.reset_notify_added = 1; | ||
2670 | list_add_tail(&recv_mcq->mcq.reset_notify, | ||
2671 | &cq_notify_list); | ||
2672 | } | ||
2673 | } | ||
2674 | spin_unlock_irqrestore(&recv_mcq->lock, | ||
2675 | flags_cq); | ||
2676 | } | ||
2677 | } | ||
2678 | spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); | ||
2679 | } | ||
2680 | |||
2681 | list_for_each_entry(mcq, &cq_notify_list, reset_notify) { | ||
2682 | mcq->comp(mcq); | ||
2683 | } | ||
2684 | spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); | ||
2685 | pr_warn("mlx4_ib_handle_catas_error ended\n"); | ||
2686 | } | ||
2687 | |||
2688 | static void handle_bonded_port_state_event(struct work_struct *work) | ||
2689 | { | ||
2690 | struct ib_event_work *ew = | ||
2691 | container_of(work, struct ib_event_work, work); | ||
2692 | struct mlx4_ib_dev *ibdev = ew->ib_dev; | ||
2693 | enum ib_port_state bonded_port_state = IB_PORT_NOP; | ||
2694 | int i; | ||
2695 | struct ib_event ibev; | ||
2696 | |||
2697 | kfree(ew); | ||
2698 | spin_lock_bh(&ibdev->iboe.lock); | ||
2699 | for (i = 0; i < MLX4_MAX_PORTS; ++i) { | ||
2700 | struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; | ||
2701 | |||
2702 | enum ib_port_state curr_port_state = | ||
2703 | (netif_running(curr_netdev) && | ||
2704 | netif_carrier_ok(curr_netdev)) ? | ||
2705 | IB_PORT_ACTIVE : IB_PORT_DOWN; | ||
2706 | |||
2707 | bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ? | ||
2708 | curr_port_state : IB_PORT_ACTIVE; | ||
2709 | } | ||
2710 | spin_unlock_bh(&ibdev->iboe.lock); | ||
2711 | |||
2712 | ibev.device = &ibdev->ib_dev; | ||
2713 | ibev.element.port_num = 1; | ||
2714 | ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ? | ||
2715 | IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; | ||
2716 | |||
2717 | ib_dispatch_event(&ibev); | ||
2718 | } | ||
2719 | |||
2538 | static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | 2720 | static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, |
2539 | enum mlx4_dev_event event, unsigned long param) | 2721 | enum mlx4_dev_event event, unsigned long param) |
2540 | { | 2722 | { |
@@ -2544,6 +2726,18 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | |||
2544 | struct ib_event_work *ew; | 2726 | struct ib_event_work *ew; |
2545 | int p = 0; | 2727 | int p = 0; |
2546 | 2728 | ||
2729 | if (mlx4_is_bonded(dev) && | ||
2730 | ((event == MLX4_DEV_EVENT_PORT_UP) || | ||
2731 | (event == MLX4_DEV_EVENT_PORT_DOWN))) { | ||
2732 | ew = kmalloc(sizeof(*ew), GFP_ATOMIC); | ||
2733 | if (!ew) | ||
2734 | return; | ||
2735 | INIT_WORK(&ew->work, handle_bonded_port_state_event); | ||
2736 | ew->ib_dev = ibdev; | ||
2737 | queue_work(wq, &ew->work); | ||
2738 | return; | ||
2739 | } | ||
2740 | |||
2547 | if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) | 2741 | if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) |
2548 | eqe = (struct mlx4_eqe *)param; | 2742 | eqe = (struct mlx4_eqe *)param; |
2549 | else | 2743 | else |
@@ -2570,6 +2764,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | |||
2570 | case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: | 2764 | case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: |
2571 | ibdev->ib_active = false; | 2765 | ibdev->ib_active = false; |
2572 | ibev.event = IB_EVENT_DEVICE_FATAL; | 2766 | ibev.event = IB_EVENT_DEVICE_FATAL; |
2767 | mlx4_ib_handle_catas_error(ibdev); | ||
2573 | break; | 2768 | break; |
2574 | 2769 | ||
2575 | case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: | 2770 | case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: |
@@ -2604,7 +2799,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | |||
2604 | } | 2799 | } |
2605 | 2800 | ||
2606 | ibev.device = ibdev_ptr; | 2801 | ibev.device = ibdev_ptr; |
2607 | ibev.element.port_num = (u8) p; | 2802 | ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p; |
2608 | 2803 | ||
2609 | ib_dispatch_event(&ibev); | 2804 | ib_dispatch_event(&ibev); |
2610 | } | 2805 | } |
@@ -2613,7 +2808,8 @@ static struct mlx4_interface mlx4_ib_interface = { | |||
2613 | .add = mlx4_ib_add, | 2808 | .add = mlx4_ib_add, |
2614 | .remove = mlx4_ib_remove, | 2809 | .remove = mlx4_ib_remove, |
2615 | .event = mlx4_ib_event, | 2810 | .event = mlx4_ib_event, |
2616 | .protocol = MLX4_PROT_IB_IPV6 | 2811 | .protocol = MLX4_PROT_IB_IPV6, |
2812 | .flags = MLX4_INTFF_BONDING | ||
2617 | }; | 2813 | }; |
2618 | 2814 | ||
2619 | static int __init mlx4_ib_init(void) | 2815 | static int __init mlx4_ib_init(void) |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 6eb743f65f6f..f829fd935b79 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -110,6 +110,9 @@ struct mlx4_ib_cq { | |||
110 | struct mutex resize_mutex; | 110 | struct mutex resize_mutex; |
111 | struct ib_umem *umem; | 111 | struct ib_umem *umem; |
112 | struct ib_umem *resize_umem; | 112 | struct ib_umem *resize_umem; |
113 | /* List of qps that it serves.*/ | ||
114 | struct list_head send_qp_list; | ||
115 | struct list_head recv_qp_list; | ||
113 | }; | 116 | }; |
114 | 117 | ||
115 | struct mlx4_ib_mr { | 118 | struct mlx4_ib_mr { |
@@ -134,10 +137,17 @@ struct mlx4_ib_fmr { | |||
134 | struct mlx4_fmr mfmr; | 137 | struct mlx4_fmr mfmr; |
135 | }; | 138 | }; |
136 | 139 | ||
140 | #define MAX_REGS_PER_FLOW 2 | ||
141 | |||
142 | struct mlx4_flow_reg_id { | ||
143 | u64 id; | ||
144 | u64 mirror; | ||
145 | }; | ||
146 | |||
137 | struct mlx4_ib_flow { | 147 | struct mlx4_ib_flow { |
138 | struct ib_flow ibflow; | 148 | struct ib_flow ibflow; |
139 | /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */ | 149 | /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */ |
140 | u64 reg_id[2]; | 150 | struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW]; |
141 | }; | 151 | }; |
142 | 152 | ||
143 | struct mlx4_ib_wq { | 153 | struct mlx4_ib_wq { |
@@ -293,6 +303,9 @@ struct mlx4_ib_qp { | |||
293 | struct mlx4_roce_smac_vlan_info pri; | 303 | struct mlx4_roce_smac_vlan_info pri; |
294 | struct mlx4_roce_smac_vlan_info alt; | 304 | struct mlx4_roce_smac_vlan_info alt; |
295 | u64 reg_id; | 305 | u64 reg_id; |
306 | struct list_head qps_list; | ||
307 | struct list_head cq_recv_list; | ||
308 | struct list_head cq_send_list; | ||
296 | }; | 309 | }; |
297 | 310 | ||
298 | struct mlx4_ib_srq { | 311 | struct mlx4_ib_srq { |
@@ -527,6 +540,10 @@ struct mlx4_ib_dev { | |||
527 | struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS]; | 540 | struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS]; |
528 | /* lock when destroying qp1_proxy and getting netdev events */ | 541 | /* lock when destroying qp1_proxy and getting netdev events */ |
529 | struct mutex qp1_proxy_lock[MLX4_MAX_PORTS]; | 542 | struct mutex qp1_proxy_lock[MLX4_MAX_PORTS]; |
543 | u8 bond_next_port; | ||
544 | /* protect resources needed as part of reset flow */ | ||
545 | spinlock_t reset_flow_resource_lock; | ||
546 | struct list_head qp_list; | ||
530 | }; | 547 | }; |
531 | 548 | ||
532 | struct ib_event_work { | 549 | struct ib_event_work { |
@@ -622,6 +639,13 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah) | |||
622 | return container_of(ibah, struct mlx4_ib_ah, ibah); | 639 | return container_of(ibah, struct mlx4_ib_ah, ibah); |
623 | } | 640 | } |
624 | 641 | ||
642 | static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev) | ||
643 | { | ||
644 | dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports; | ||
645 | |||
646 | return dev->bond_next_port + 1; | ||
647 | } | ||
648 | |||
625 | int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev); | 649 | int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev); |
626 | void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev); | 650 | void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev); |
627 | 651 | ||
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index c36ccbd9a644..e0d271782d0a 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
@@ -401,7 +401,8 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device | |||
401 | if (!mfrpl->ibfrpl.page_list) | 401 | if (!mfrpl->ibfrpl.page_list) |
402 | goto err_free; | 402 | goto err_free; |
403 | 403 | ||
404 | mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev, | 404 | mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->persist-> |
405 | pdev->dev, | ||
405 | size, &mfrpl->map, | 406 | size, &mfrpl->map, |
406 | GFP_KERNEL); | 407 | GFP_KERNEL); |
407 | if (!mfrpl->mapped_page_list) | 408 | if (!mfrpl->mapped_page_list) |
@@ -423,7 +424,8 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) | |||
423 | struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); | 424 | struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); |
424 | int size = page_list->max_page_list_len * sizeof (u64); | 425 | int size = page_list->max_page_list_len * sizeof (u64); |
425 | 426 | ||
426 | dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list, | 427 | dma_free_coherent(&dev->dev->persist->pdev->dev, size, |
428 | mfrpl->mapped_page_list, | ||
427 | mfrpl->map); | 429 | mfrpl->map); |
428 | kfree(mfrpl->ibfrpl.page_list); | 430 | kfree(mfrpl->ibfrpl.page_list); |
429 | kfree(mfrpl); | 431 | kfree(mfrpl); |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index cf000b7ad64f..dfc6ca128a7e 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -40,11 +40,17 @@ | |||
40 | #include <rdma/ib_addr.h> | 40 | #include <rdma/ib_addr.h> |
41 | #include <rdma/ib_mad.h> | 41 | #include <rdma/ib_mad.h> |
42 | 42 | ||
43 | #include <linux/mlx4/driver.h> | ||
43 | #include <linux/mlx4/qp.h> | 44 | #include <linux/mlx4/qp.h> |
44 | 45 | ||
45 | #include "mlx4_ib.h" | 46 | #include "mlx4_ib.h" |
46 | #include "user.h" | 47 | #include "user.h" |
47 | 48 | ||
49 | static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, | ||
50 | struct mlx4_ib_cq *recv_cq); | ||
51 | static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, | ||
52 | struct mlx4_ib_cq *recv_cq); | ||
53 | |||
48 | enum { | 54 | enum { |
49 | MLX4_IB_ACK_REQ_FREQ = 8, | 55 | MLX4_IB_ACK_REQ_FREQ = 8, |
50 | }; | 56 | }; |
@@ -93,17 +99,6 @@ enum { | |||
93 | #ifndef ETH_ALEN | 99 | #ifndef ETH_ALEN |
94 | #define ETH_ALEN 6 | 100 | #define ETH_ALEN 6 |
95 | #endif | 101 | #endif |
96 | static inline u64 mlx4_mac_to_u64(u8 *addr) | ||
97 | { | ||
98 | u64 mac = 0; | ||
99 | int i; | ||
100 | |||
101 | for (i = 0; i < ETH_ALEN; i++) { | ||
102 | mac <<= 8; | ||
103 | mac |= addr[i]; | ||
104 | } | ||
105 | return mac; | ||
106 | } | ||
107 | 102 | ||
108 | static const __be32 mlx4_ib_opcode[] = { | 103 | static const __be32 mlx4_ib_opcode[] = { |
109 | [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), | 104 | [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), |
@@ -628,6 +623,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
628 | struct mlx4_ib_sqp *sqp; | 623 | struct mlx4_ib_sqp *sqp; |
629 | struct mlx4_ib_qp *qp; | 624 | struct mlx4_ib_qp *qp; |
630 | enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; | 625 | enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; |
626 | struct mlx4_ib_cq *mcq; | ||
627 | unsigned long flags; | ||
631 | 628 | ||
632 | /* When tunneling special qps, we use a plain UD qp */ | 629 | /* When tunneling special qps, we use a plain UD qp */ |
633 | if (sqpn) { | 630 | if (sqpn) { |
@@ -838,6 +835,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
838 | qp->mqp.event = mlx4_ib_qp_event; | 835 | qp->mqp.event = mlx4_ib_qp_event; |
839 | if (!*caller_qp) | 836 | if (!*caller_qp) |
840 | *caller_qp = qp; | 837 | *caller_qp = qp; |
838 | |||
839 | spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); | ||
840 | mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), | ||
841 | to_mcq(init_attr->recv_cq)); | ||
842 | /* Maintain device to QPs access, needed for further handling | ||
843 | * via reset flow | ||
844 | */ | ||
845 | list_add_tail(&qp->qps_list, &dev->qp_list); | ||
846 | /* Maintain CQ to QPs access, needed for further handling | ||
847 | * via reset flow | ||
848 | */ | ||
849 | mcq = to_mcq(init_attr->send_cq); | ||
850 | list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); | ||
851 | mcq = to_mcq(init_attr->recv_cq); | ||
852 | list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); | ||
853 | mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), | ||
854 | to_mcq(init_attr->recv_cq)); | ||
855 | spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); | ||
841 | return 0; | 856 | return 0; |
842 | 857 | ||
843 | err_qpn: | 858 | err_qpn: |
@@ -896,13 +911,13 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv | |||
896 | __acquires(&send_cq->lock) __acquires(&recv_cq->lock) | 911 | __acquires(&send_cq->lock) __acquires(&recv_cq->lock) |
897 | { | 912 | { |
898 | if (send_cq == recv_cq) { | 913 | if (send_cq == recv_cq) { |
899 | spin_lock_irq(&send_cq->lock); | 914 | spin_lock(&send_cq->lock); |
900 | __acquire(&recv_cq->lock); | 915 | __acquire(&recv_cq->lock); |
901 | } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { | 916 | } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { |
902 | spin_lock_irq(&send_cq->lock); | 917 | spin_lock(&send_cq->lock); |
903 | spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); | 918 | spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); |
904 | } else { | 919 | } else { |
905 | spin_lock_irq(&recv_cq->lock); | 920 | spin_lock(&recv_cq->lock); |
906 | spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); | 921 | spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); |
907 | } | 922 | } |
908 | } | 923 | } |
@@ -912,13 +927,13 @@ static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *re | |||
912 | { | 927 | { |
913 | if (send_cq == recv_cq) { | 928 | if (send_cq == recv_cq) { |
914 | __release(&recv_cq->lock); | 929 | __release(&recv_cq->lock); |
915 | spin_unlock_irq(&send_cq->lock); | 930 | spin_unlock(&send_cq->lock); |
916 | } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { | 931 | } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { |
917 | spin_unlock(&recv_cq->lock); | 932 | spin_unlock(&recv_cq->lock); |
918 | spin_unlock_irq(&send_cq->lock); | 933 | spin_unlock(&send_cq->lock); |
919 | } else { | 934 | } else { |
920 | spin_unlock(&send_cq->lock); | 935 | spin_unlock(&send_cq->lock); |
921 | spin_unlock_irq(&recv_cq->lock); | 936 | spin_unlock(&recv_cq->lock); |
922 | } | 937 | } |
923 | } | 938 | } |
924 | 939 | ||
@@ -963,6 +978,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
963 | int is_user) | 978 | int is_user) |
964 | { | 979 | { |
965 | struct mlx4_ib_cq *send_cq, *recv_cq; | 980 | struct mlx4_ib_cq *send_cq, *recv_cq; |
981 | unsigned long flags; | ||
966 | 982 | ||
967 | if (qp->state != IB_QPS_RESET) { | 983 | if (qp->state != IB_QPS_RESET) { |
968 | if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), | 984 | if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), |
@@ -994,8 +1010,13 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
994 | 1010 | ||
995 | get_cqs(qp, &send_cq, &recv_cq); | 1011 | get_cqs(qp, &send_cq, &recv_cq); |
996 | 1012 | ||
1013 | spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); | ||
997 | mlx4_ib_lock_cqs(send_cq, recv_cq); | 1014 | mlx4_ib_lock_cqs(send_cq, recv_cq); |
998 | 1015 | ||
1016 | /* del from lists under both locks above to protect reset flow paths */ | ||
1017 | list_del(&qp->qps_list); | ||
1018 | list_del(&qp->cq_send_list); | ||
1019 | list_del(&qp->cq_recv_list); | ||
999 | if (!is_user) { | 1020 | if (!is_user) { |
1000 | __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, | 1021 | __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, |
1001 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); | 1022 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); |
@@ -1006,6 +1027,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
1006 | mlx4_qp_remove(dev->dev, &qp->mqp); | 1027 | mlx4_qp_remove(dev->dev, &qp->mqp); |
1007 | 1028 | ||
1008 | mlx4_ib_unlock_cqs(send_cq, recv_cq); | 1029 | mlx4_ib_unlock_cqs(send_cq, recv_cq); |
1030 | spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); | ||
1009 | 1031 | ||
1010 | mlx4_qp_free(dev->dev, &qp->mqp); | 1032 | mlx4_qp_free(dev->dev, &qp->mqp); |
1011 | 1033 | ||
@@ -1915,6 +1937,22 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
1915 | goto out; | 1937 | goto out; |
1916 | } | 1938 | } |
1917 | 1939 | ||
1940 | if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) { | ||
1941 | if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) { | ||
1942 | if ((ibqp->qp_type == IB_QPT_RC) || | ||
1943 | (ibqp->qp_type == IB_QPT_UD) || | ||
1944 | (ibqp->qp_type == IB_QPT_UC) || | ||
1945 | (ibqp->qp_type == IB_QPT_RAW_PACKET) || | ||
1946 | (ibqp->qp_type == IB_QPT_XRC_INI)) { | ||
1947 | attr->port_num = mlx4_ib_bond_next_port(dev); | ||
1948 | } | ||
1949 | } else { | ||
1950 | /* no sense in changing port_num | ||
1951 | * when ports are bonded */ | ||
1952 | attr_mask &= ~IB_QP_PORT; | ||
1953 | } | ||
1954 | } | ||
1955 | |||
1918 | if ((attr_mask & IB_QP_PORT) && | 1956 | if ((attr_mask & IB_QP_PORT) && |
1919 | (attr->port_num == 0 || attr->port_num > dev->num_ports)) { | 1957 | (attr->port_num == 0 || attr->port_num > dev->num_ports)) { |
1920 | pr_debug("qpn 0x%x: invalid port number (%d) specified " | 1958 | pr_debug("qpn 0x%x: invalid port number (%d) specified " |
@@ -1965,6 +2003,9 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
1965 | 2003 | ||
1966 | err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); | 2004 | err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); |
1967 | 2005 | ||
2006 | if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) | ||
2007 | attr->port_num = 1; | ||
2008 | |||
1968 | out: | 2009 | out: |
1969 | mutex_unlock(&qp->mutex); | 2010 | mutex_unlock(&qp->mutex); |
1970 | return err; | 2011 | return err; |
@@ -2609,8 +2650,15 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2609 | __be32 uninitialized_var(lso_hdr_sz); | 2650 | __be32 uninitialized_var(lso_hdr_sz); |
2610 | __be32 blh; | 2651 | __be32 blh; |
2611 | int i; | 2652 | int i; |
2653 | struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); | ||
2612 | 2654 | ||
2613 | spin_lock_irqsave(&qp->sq.lock, flags); | 2655 | spin_lock_irqsave(&qp->sq.lock, flags); |
2656 | if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { | ||
2657 | err = -EIO; | ||
2658 | *bad_wr = wr; | ||
2659 | nreq = 0; | ||
2660 | goto out; | ||
2661 | } | ||
2614 | 2662 | ||
2615 | ind = qp->sq_next_wqe; | 2663 | ind = qp->sq_next_wqe; |
2616 | 2664 | ||
@@ -2908,10 +2956,18 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
2908 | int ind; | 2956 | int ind; |
2909 | int max_gs; | 2957 | int max_gs; |
2910 | int i; | 2958 | int i; |
2959 | struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); | ||
2911 | 2960 | ||
2912 | max_gs = qp->rq.max_gs; | 2961 | max_gs = qp->rq.max_gs; |
2913 | spin_lock_irqsave(&qp->rq.lock, flags); | 2962 | spin_lock_irqsave(&qp->rq.lock, flags); |
2914 | 2963 | ||
2964 | if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { | ||
2965 | err = -EIO; | ||
2966 | *bad_wr = wr; | ||
2967 | nreq = 0; | ||
2968 | goto out; | ||
2969 | } | ||
2970 | |||
2915 | ind = qp->rq.head & (qp->rq.wqe_cnt - 1); | 2971 | ind = qp->rq.head & (qp->rq.wqe_cnt - 1); |
2916 | 2972 | ||
2917 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | 2973 | for (nreq = 0; wr; ++nreq, wr = wr->next) { |
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 62d9285300af..dce5dfe3a70e 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c | |||
@@ -316,8 +316,15 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
316 | int err = 0; | 316 | int err = 0; |
317 | int nreq; | 317 | int nreq; |
318 | int i; | 318 | int i; |
319 | struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device); | ||
319 | 320 | ||
320 | spin_lock_irqsave(&srq->lock, flags); | 321 | spin_lock_irqsave(&srq->lock, flags); |
322 | if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { | ||
323 | err = -EIO; | ||
324 | *bad_wr = wr; | ||
325 | nreq = 0; | ||
326 | goto out; | ||
327 | } | ||
321 | 328 | ||
322 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | 329 | for (nreq = 0; wr; ++nreq, wr = wr->next) { |
323 | if (unlikely(wr->num_sge > srq->msrq.max_gs)) { | 330 | if (unlikely(wr->num_sge > srq->msrq.max_gs)) { |
@@ -362,6 +369,7 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
362 | 369 | ||
363 | *srq->db.db = cpu_to_be32(srq->wqe_ctr); | 370 | *srq->db.db = cpu_to_be32(srq->wqe_ctr); |
364 | } | 371 | } |
372 | out: | ||
365 | 373 | ||
366 | spin_unlock_irqrestore(&srq->lock, flags); | 374 | spin_unlock_irqrestore(&srq->lock, flags); |
367 | 375 | ||
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index cb4c66e723b5..d10c2b8a5dad 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c | |||
@@ -375,7 +375,7 @@ static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max) | |||
375 | char base_name[9]; | 375 | char base_name[9]; |
376 | 376 | ||
377 | /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */ | 377 | /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */ |
378 | strlcpy(name, pci_name(dev->dev->pdev), max); | 378 | strlcpy(name, pci_name(dev->dev->persist->pdev), max); |
379 | strncpy(base_name, name, 8); /*till xxxx:yy:*/ | 379 | strncpy(base_name, name, 8); /*till xxxx:yy:*/ |
380 | base_name[8] = '\0'; | 380 | base_name[8] = '\0'; |
381 | /* with no ARI only 3 last bits are used so when the fn is higher than 8 | 381 | /* with no ARI only 3 last bits are used so when the fn is higher than 8 |
@@ -792,7 +792,7 @@ static int register_pkey_tree(struct mlx4_ib_dev *device) | |||
792 | if (!mlx4_is_master(device->dev)) | 792 | if (!mlx4_is_master(device->dev)) |
793 | return 0; | 793 | return 0; |
794 | 794 | ||
795 | for (i = 0; i <= device->dev->num_vfs; ++i) | 795 | for (i = 0; i <= device->dev->persist->num_vfs; ++i) |
796 | register_one_pkey_tree(device, i); | 796 | register_one_pkey_tree(device, i); |
797 | 797 | ||
798 | return 0; | 798 | return 0; |
@@ -807,7 +807,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device) | |||
807 | if (!mlx4_is_master(device->dev)) | 807 | if (!mlx4_is_master(device->dev)) |
808 | return; | 808 | return; |
809 | 809 | ||
810 | for (slave = device->dev->num_vfs; slave >= 0; --slave) { | 810 | for (slave = device->dev->persist->num_vfs; slave >= 0; --slave) { |
811 | list_for_each_entry_safe(p, t, | 811 | list_for_each_entry_safe(p, t, |
812 | &device->pkeys.pkey_port_list[slave], | 812 | &device->pkeys.pkey_port_list[slave], |
813 | entry) { | 813 | entry) { |