aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h19
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c285
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c75
3 files changed, 329 insertions, 50 deletions
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index a230683af940..febc8f9bc59a 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -241,6 +241,22 @@ struct mlx4_ib_proxy_sqp_hdr {
241 struct mlx4_rcv_tunnel_hdr tun; 241 struct mlx4_rcv_tunnel_hdr tun;
242} __packed; 242} __packed;
243 243
244struct mlx4_roce_smac_vlan_info {
245 u64 smac;
246 int smac_index;
247 int smac_port;
248 u64 candidate_smac;
249 int candidate_smac_index;
250 int candidate_smac_port;
251 u16 vid;
252 int vlan_index;
253 int vlan_port;
254 u16 candidate_vid;
255 int candidate_vlan_index;
256 int candidate_vlan_port;
257 int update_vid;
258};
259
244struct mlx4_ib_qp { 260struct mlx4_ib_qp {
245 struct ib_qp ibqp; 261 struct ib_qp ibqp;
246 struct mlx4_qp mqp; 262 struct mlx4_qp mqp;
@@ -273,8 +289,9 @@ struct mlx4_ib_qp {
273 struct list_head gid_list; 289 struct list_head gid_list;
274 struct list_head steering_rules; 290 struct list_head steering_rules;
275 struct mlx4_ib_buf *sqp_proxy_rcv; 291 struct mlx4_ib_buf *sqp_proxy_rcv;
292 struct mlx4_roce_smac_vlan_info pri;
293 struct mlx4_roce_smac_vlan_info alt;
276 u64 reg_id; 294 u64 reg_id;
277
278}; 295};
279 296
280struct mlx4_ib_srq { 297struct mlx4_ib_srq {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index c6ef2e7e3045..11332f074023 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -662,10 +662,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
662 if (!sqp) 662 if (!sqp)
663 return -ENOMEM; 663 return -ENOMEM;
664 qp = &sqp->qp; 664 qp = &sqp->qp;
665 qp->pri.vid = 0xFFFF;
666 qp->alt.vid = 0xFFFF;
665 } else { 667 } else {
666 qp = kzalloc(sizeof (struct mlx4_ib_qp), GFP_KERNEL); 668 qp = kzalloc(sizeof (struct mlx4_ib_qp), GFP_KERNEL);
667 if (!qp) 669 if (!qp)
668 return -ENOMEM; 670 return -ENOMEM;
671 qp->pri.vid = 0xFFFF;
672 qp->alt.vid = 0xFFFF;
669 } 673 }
670 } else 674 } else
671 qp = *caller_qp; 675 qp = *caller_qp;
@@ -940,11 +944,32 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
940{ 944{
941 struct mlx4_ib_cq *send_cq, *recv_cq; 945 struct mlx4_ib_cq *send_cq, *recv_cq;
942 946
943 if (qp->state != IB_QPS_RESET) 947 if (qp->state != IB_QPS_RESET) {
944 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), 948 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
945 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) 949 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
946 pr_warn("modify QP %06x to RESET failed.\n", 950 pr_warn("modify QP %06x to RESET failed.\n",
947 qp->mqp.qpn); 951 qp->mqp.qpn);
952 if (qp->pri.smac) {
953 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
954 qp->pri.smac = 0;
955 }
956 if (qp->alt.smac) {
957 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
958 qp->alt.smac = 0;
959 }
960 if (qp->pri.vid < 0x1000) {
961 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
962 qp->pri.vid = 0xFFFF;
963 qp->pri.candidate_vid = 0xFFFF;
964 qp->pri.update_vid = 0;
965 }
966 if (qp->alt.vid < 0x1000) {
967 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
968 qp->alt.vid = 0xFFFF;
969 qp->alt.candidate_vid = 0xFFFF;
970 qp->alt.update_vid = 0;
971 }
972 }
948 973
949 get_cqs(qp, &send_cq, &recv_cq); 974 get_cqs(qp, &send_cq, &recv_cq);
950 975
@@ -1057,6 +1082,8 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
1057 qp = kzalloc(sizeof *qp, GFP_KERNEL); 1082 qp = kzalloc(sizeof *qp, GFP_KERNEL);
1058 if (!qp) 1083 if (!qp)
1059 return ERR_PTR(-ENOMEM); 1084 return ERR_PTR(-ENOMEM);
1085 qp->pri.vid = 0xFFFF;
1086 qp->alt.vid = 0xFFFF;
1060 /* fall through */ 1087 /* fall through */
1061 case IB_QPT_UD: 1088 case IB_QPT_UD:
1062 { 1089 {
@@ -1188,12 +1215,13 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
1188 1215
1189static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, 1216static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
1190 u64 smac, u16 vlan_tag, struct mlx4_qp_path *path, 1217 u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
1191 u8 port) 1218 struct mlx4_roce_smac_vlan_info *smac_info, u8 port)
1192{ 1219{
1193 int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) == 1220 int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
1194 IB_LINK_LAYER_ETHERNET; 1221 IB_LINK_LAYER_ETHERNET;
1195 int vidx; 1222 int vidx;
1196 int smac_index; 1223 int smac_index;
1224 int err;
1197 1225
1198 1226
1199 path->grh_mylmc = ah->src_path_bits & 0x7f; 1227 path->grh_mylmc = ah->src_path_bits & 0x7f;
@@ -1223,61 +1251,103 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
1223 } 1251 }
1224 1252
1225 if (is_eth) { 1253 if (is_eth) {
1226 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
1227 ((port - 1) << 6) | ((ah->sl & 7) << 3);
1228
1229 if (!(ah->ah_flags & IB_AH_GRH)) 1254 if (!(ah->ah_flags & IB_AH_GRH))
1230 return -1; 1255 return -1;
1231 1256
1232 memcpy(path->dmac, ah->dmac, ETH_ALEN); 1257 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
1233 path->ackto = MLX4_IB_LINK_TYPE_ETH; 1258 ((port - 1) << 6) | ((ah->sl & 7) << 3);
1234 /* find the index into MAC table for IBoE */
1235 if (!is_zero_ether_addr((const u8 *)&smac)) {
1236 if (mlx4_find_cached_mac(dev->dev, port, smac,
1237 &smac_index))
1238 return -ENOENT;
1239 } else {
1240 smac_index = 0;
1241 }
1242
1243 path->grh_mylmc &= 0x80 | smac_index;
1244 1259
1245 path->feup |= MLX4_FEUP_FORCE_ETH_UP; 1260 path->feup |= MLX4_FEUP_FORCE_ETH_UP;
1246 if (vlan_tag < 0x1000) { 1261 if (vlan_tag < 0x1000) {
1247 if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx)) 1262 if (smac_info->vid < 0x1000) {
1248 return -ENOENT; 1263 /* both valid vlan ids */
1249 1264 if (smac_info->vid != vlan_tag) {
1250 path->vlan_index = vidx; 1265 /* different VIDs. unreg old and reg new */
1251 path->fl = 1 << 6; 1266 err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
1267 if (err)
1268 return err;
1269 smac_info->candidate_vid = vlan_tag;
1270 smac_info->candidate_vlan_index = vidx;
1271 smac_info->candidate_vlan_port = port;
1272 smac_info->update_vid = 1;
1273 path->vlan_index = vidx;
1274 } else {
1275 path->vlan_index = smac_info->vlan_index;
1276 }
1277 } else {
1278 /* no current vlan tag in qp */
1279 err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
1280 if (err)
1281 return err;
1282 smac_info->candidate_vid = vlan_tag;
1283 smac_info->candidate_vlan_index = vidx;
1284 smac_info->candidate_vlan_port = port;
1285 smac_info->update_vid = 1;
1286 path->vlan_index = vidx;
1287 }
1252 path->feup |= MLX4_FVL_FORCE_ETH_VLAN; 1288 path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
1289 path->fl = 1 << 6;
1290 } else {
1291 /* have current vlan tag. unregister it at modify-qp success */
1292 if (smac_info->vid < 0x1000) {
1293 smac_info->candidate_vid = 0xFFFF;
1294 smac_info->update_vid = 1;
1295 }
1253 } 1296 }
1254 } else 1297
1298 /* get smac_index for RoCE use.
1299 * If no smac was yet assigned, register one.
1300 * If one was already assigned, but the new mac differs,
1301 * unregister the old one and register the new one.
1302 */
1303 if (!smac_info->smac || smac_info->smac != smac) {
1304 /* register candidate now, unreg if needed, after success */
1305 smac_index = mlx4_register_mac(dev->dev, port, smac);
1306 if (smac_index >= 0) {
1307 smac_info->candidate_smac_index = smac_index;
1308 smac_info->candidate_smac = smac;
1309 smac_info->candidate_smac_port = port;
1310 } else {
1311 return -EINVAL;
1312 }
1313 } else {
1314 smac_index = smac_info->smac_index;
1315 }
1316
1317 memcpy(path->dmac, ah->dmac, 6);
1318 path->ackto = MLX4_IB_LINK_TYPE_ETH;
1319 /* put MAC table smac index for IBoE */
1320 path->grh_mylmc = (u8) (smac_index) | 0x80;
1321 } else {
1255 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | 1322 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
1256 ((port - 1) << 6) | ((ah->sl & 0xf) << 2); 1323 ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
1324 }
1257 1325
1258 return 0; 1326 return 0;
1259} 1327}
1260 1328
1261static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, 1329static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
1262 enum ib_qp_attr_mask qp_attr_mask, 1330 enum ib_qp_attr_mask qp_attr_mask,
1331 struct mlx4_ib_qp *mqp,
1263 struct mlx4_qp_path *path, u8 port) 1332 struct mlx4_qp_path *path, u8 port)
1264{ 1333{
1265 return _mlx4_set_path(dev, &qp->ah_attr, 1334 return _mlx4_set_path(dev, &qp->ah_attr,
1266 mlx4_mac_to_u64((u8 *)qp->smac), 1335 mlx4_mac_to_u64((u8 *)qp->smac),
1267 (qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff, 1336 (qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff,
1268 path, port); 1337 path, &mqp->pri, port);
1269} 1338}
1270 1339
1271static int mlx4_set_alt_path(struct mlx4_ib_dev *dev, 1340static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
1272 const struct ib_qp_attr *qp, 1341 const struct ib_qp_attr *qp,
1273 enum ib_qp_attr_mask qp_attr_mask, 1342 enum ib_qp_attr_mask qp_attr_mask,
1343 struct mlx4_ib_qp *mqp,
1274 struct mlx4_qp_path *path, u8 port) 1344 struct mlx4_qp_path *path, u8 port)
1275{ 1345{
1276 return _mlx4_set_path(dev, &qp->alt_ah_attr, 1346 return _mlx4_set_path(dev, &qp->alt_ah_attr,
1277 mlx4_mac_to_u64((u8 *)qp->alt_smac), 1347 mlx4_mac_to_u64((u8 *)qp->alt_smac),
1278 (qp_attr_mask & IB_QP_ALT_VID) ? 1348 (qp_attr_mask & IB_QP_ALT_VID) ?
1279 qp->alt_vlan_id : 0xffff, 1349 qp->alt_vlan_id : 0xffff,
1280 path, port); 1350 path, &mqp->alt, port);
1281} 1351}
1282 1352
1283static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) 1353static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
@@ -1292,6 +1362,37 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1292 } 1362 }
1293} 1363}
1294 1364
1365static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac,
1366 struct mlx4_qp_context *context)
1367{
1368 struct net_device *ndev;
1369 u64 u64_mac;
1370 int smac_index;
1371
1372
1373 ndev = dev->iboe.netdevs[qp->port - 1];
1374 if (ndev) {
1375 smac = ndev->dev_addr;
1376 u64_mac = mlx4_mac_to_u64(smac);
1377 } else {
1378 u64_mac = dev->dev->caps.def_mac[qp->port];
1379 }
1380
1381 context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6);
1382 if (!qp->pri.smac) {
1383 smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac);
1384 if (smac_index >= 0) {
1385 qp->pri.candidate_smac_index = smac_index;
1386 qp->pri.candidate_smac = u64_mac;
1387 qp->pri.candidate_smac_port = qp->port;
1388 context->pri_path.grh_mylmc = 0x80 | (u8) smac_index;
1389 } else {
1390 return -ENOENT;
1391 }
1392 }
1393 return 0;
1394}
1395
1295static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, 1396static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1296 const struct ib_qp_attr *attr, int attr_mask, 1397 const struct ib_qp_attr *attr, int attr_mask,
1297 enum ib_qp_state cur_state, enum ib_qp_state new_state) 1398 enum ib_qp_state cur_state, enum ib_qp_state new_state)
@@ -1403,7 +1504,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1403 } 1504 }
1404 1505
1405 if (attr_mask & IB_QP_AV) { 1506 if (attr_mask & IB_QP_AV) {
1406 if (mlx4_set_path(dev, attr, attr_mask, &context->pri_path, 1507 if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
1407 attr_mask & IB_QP_PORT ? 1508 attr_mask & IB_QP_PORT ?
1408 attr->port_num : qp->port)) 1509 attr->port_num : qp->port))
1409 goto out; 1510 goto out;
@@ -1426,7 +1527,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1426 dev->dev->caps.pkey_table_len[attr->alt_port_num]) 1527 dev->dev->caps.pkey_table_len[attr->alt_port_num])
1427 goto out; 1528 goto out;
1428 1529
1429 if (mlx4_set_alt_path(dev, attr, attr_mask, &context->alt_path, 1530 if (mlx4_set_alt_path(dev, attr, attr_mask, qp,
1531 &context->alt_path,
1430 attr->alt_port_num)) 1532 attr->alt_port_num))
1431 goto out; 1533 goto out;
1432 1534
@@ -1532,6 +1634,20 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1532 context->pri_path.fl = 0x80; 1634 context->pri_path.fl = 0x80;
1533 context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE; 1635 context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE;
1534 } 1636 }
1637 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
1638 IB_LINK_LAYER_ETHERNET) {
1639 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI ||
1640 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI)
1641 context->pri_path.feup = 1 << 7; /* don't fsm */
1642 /* handle smac_index */
1643 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD ||
1644 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
1645 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
1646 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
1647 if (err)
1648 return -EINVAL;
1649 }
1650 }
1535 } 1651 }
1536 1652
1537 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) 1653 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
@@ -1619,28 +1735,113 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1619 * If we moved a kernel QP to RESET, clean up all old CQ 1735 * If we moved a kernel QP to RESET, clean up all old CQ
1620 * entries and reinitialize the QP. 1736 * entries and reinitialize the QP.
1621 */ 1737 */
1622 if (new_state == IB_QPS_RESET && !ibqp->uobject) { 1738 if (new_state == IB_QPS_RESET) {
1623 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, 1739 if (!ibqp->uobject) {
1624 ibqp->srq ? to_msrq(ibqp->srq): NULL); 1740 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
1625 if (send_cq != recv_cq) 1741 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1626 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); 1742 if (send_cq != recv_cq)
1743 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1744
1745 qp->rq.head = 0;
1746 qp->rq.tail = 0;
1747 qp->sq.head = 0;
1748 qp->sq.tail = 0;
1749 qp->sq_next_wqe = 0;
1750 if (qp->rq.wqe_cnt)
1751 *qp->db.db = 0;
1627 1752
1628 qp->rq.head = 0; 1753 if (qp->flags & MLX4_IB_QP_NETIF)
1629 qp->rq.tail = 0; 1754 mlx4_ib_steer_qp_reg(dev, qp, 0);
1630 qp->sq.head = 0; 1755 }
1631 qp->sq.tail = 0; 1756 if (qp->pri.smac) {
1632 qp->sq_next_wqe = 0; 1757 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
1633 if (qp->rq.wqe_cnt) 1758 qp->pri.smac = 0;
1634 *qp->db.db = 0; 1759 }
1760 if (qp->alt.smac) {
1761 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
1762 qp->alt.smac = 0;
1763 }
1764 if (qp->pri.vid < 0x1000) {
1765 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
1766 qp->pri.vid = 0xFFFF;
1767 qp->pri.candidate_vid = 0xFFFF;
1768 qp->pri.update_vid = 0;
1769 }
1635 1770
1636 if (qp->flags & MLX4_IB_QP_NETIF) 1771 if (qp->alt.vid < 0x1000) {
1637 mlx4_ib_steer_qp_reg(dev, qp, 0); 1772 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
1773 qp->alt.vid = 0xFFFF;
1774 qp->alt.candidate_vid = 0xFFFF;
1775 qp->alt.update_vid = 0;
1776 }
1638 } 1777 }
1639
1640out: 1778out:
1641 if (err && steer_qp) 1779 if (err && steer_qp)
1642 mlx4_ib_steer_qp_reg(dev, qp, 0); 1780 mlx4_ib_steer_qp_reg(dev, qp, 0);
1643 kfree(context); 1781 kfree(context);
1782 if (qp->pri.candidate_smac) {
1783 if (err) {
1784 mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
1785 } else {
1786 if (qp->pri.smac)
1787 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
1788 qp->pri.smac = qp->pri.candidate_smac;
1789 qp->pri.smac_index = qp->pri.candidate_smac_index;
1790 qp->pri.smac_port = qp->pri.candidate_smac_port;
1791 }
1792 qp->pri.candidate_smac = 0;
1793 qp->pri.candidate_smac_index = 0;
1794 qp->pri.candidate_smac_port = 0;
1795 }
1796 if (qp->alt.candidate_smac) {
1797 if (err) {
1798 mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac);
1799 } else {
1800 if (qp->alt.smac)
1801 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
1802 qp->alt.smac = qp->alt.candidate_smac;
1803 qp->alt.smac_index = qp->alt.candidate_smac_index;
1804 qp->alt.smac_port = qp->alt.candidate_smac_port;
1805 }
1806 qp->alt.candidate_smac = 0;
1807 qp->alt.candidate_smac_index = 0;
1808 qp->alt.candidate_smac_port = 0;
1809 }
1810
1811 if (qp->pri.update_vid) {
1812 if (err) {
1813 if (qp->pri.candidate_vid < 0x1000)
1814 mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port,
1815 qp->pri.candidate_vid);
1816 } else {
1817 if (qp->pri.vid < 0x1000)
1818 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port,
1819 qp->pri.vid);
1820 qp->pri.vid = qp->pri.candidate_vid;
1821 qp->pri.vlan_port = qp->pri.candidate_vlan_port;
1822 qp->pri.vlan_index = qp->pri.candidate_vlan_index;
1823 }
1824 qp->pri.candidate_vid = 0xFFFF;
1825 qp->pri.update_vid = 0;
1826 }
1827
1828 if (qp->alt.update_vid) {
1829 if (err) {
1830 if (qp->alt.candidate_vid < 0x1000)
1831 mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port,
1832 qp->alt.candidate_vid);
1833 } else {
1834 if (qp->alt.vid < 0x1000)
1835 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port,
1836 qp->alt.vid);
1837 qp->alt.vid = qp->alt.candidate_vid;
1838 qp->alt.vlan_port = qp->alt.candidate_vlan_port;
1839 qp->alt.vlan_index = qp->alt.candidate_vlan_index;
1840 }
1841 qp->alt.candidate_vid = 0xFFFF;
1842 qp->alt.update_vid = 0;
1843 }
1844
1644 return err; 1845 return err;
1645} 1846}
1646 1847
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 1c3634eab5e1..706a6d2b538c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -52,6 +52,8 @@
52struct mac_res { 52struct mac_res {
53 struct list_head list; 53 struct list_head list;
54 u64 mac; 54 u64 mac;
55 int ref_count;
56 u8 smac_index;
55 u8 port; 57 u8 port;
56}; 58};
57 59
@@ -1683,11 +1685,39 @@ static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1683 return err; 1685 return err;
1684} 1686}
1685 1687
1686static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port) 1688static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1689 u8 smac_index, u64 *mac)
1690{
1691 struct mlx4_priv *priv = mlx4_priv(dev);
1692 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1693 struct list_head *mac_list =
1694 &tracker->slave_list[slave].res_list[RES_MAC];
1695 struct mac_res *res, *tmp;
1696
1697 list_for_each_entry_safe(res, tmp, mac_list, list) {
1698 if (res->smac_index == smac_index && res->port == (u8) port) {
1699 *mac = res->mac;
1700 return 0;
1701 }
1702 }
1703 return -ENOENT;
1704}
1705
1706static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1687{ 1707{
1688 struct mlx4_priv *priv = mlx4_priv(dev); 1708 struct mlx4_priv *priv = mlx4_priv(dev);
1689 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1709 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1690 struct mac_res *res; 1710 struct list_head *mac_list =
1711 &tracker->slave_list[slave].res_list[RES_MAC];
1712 struct mac_res *res, *tmp;
1713
1714 list_for_each_entry_safe(res, tmp, mac_list, list) {
1715 if (res->mac == mac && res->port == (u8) port) {
1716 /* mac found. update ref count */
1717 ++res->ref_count;
1718 return 0;
1719 }
1720 }
1691 1721
1692 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port)) 1722 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1693 return -EINVAL; 1723 return -EINVAL;
@@ -1698,6 +1728,8 @@ static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1698 } 1728 }
1699 res->mac = mac; 1729 res->mac = mac;
1700 res->port = (u8) port; 1730 res->port = (u8) port;
1731 res->smac_index = smac_index;
1732 res->ref_count = 1;
1701 list_add_tail(&res->list, 1733 list_add_tail(&res->list,
1702 &tracker->slave_list[slave].res_list[RES_MAC]); 1734 &tracker->slave_list[slave].res_list[RES_MAC]);
1703 return 0; 1735 return 0;
@@ -1714,9 +1746,11 @@ static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1714 1746
1715 list_for_each_entry_safe(res, tmp, mac_list, list) { 1747 list_for_each_entry_safe(res, tmp, mac_list, list) {
1716 if (res->mac == mac && res->port == (u8) port) { 1748 if (res->mac == mac && res->port == (u8) port) {
1717 list_del(&res->list); 1749 if (!--res->ref_count) {
1718 mlx4_release_resource(dev, slave, RES_MAC, 1, port); 1750 list_del(&res->list);
1719 kfree(res); 1751 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1752 kfree(res);
1753 }
1720 break; 1754 break;
1721 } 1755 }
1722 } 1756 }
@@ -1729,10 +1763,13 @@ static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1729 struct list_head *mac_list = 1763 struct list_head *mac_list =
1730 &tracker->slave_list[slave].res_list[RES_MAC]; 1764 &tracker->slave_list[slave].res_list[RES_MAC];
1731 struct mac_res *res, *tmp; 1765 struct mac_res *res, *tmp;
1766 int i;
1732 1767
1733 list_for_each_entry_safe(res, tmp, mac_list, list) { 1768 list_for_each_entry_safe(res, tmp, mac_list, list) {
1734 list_del(&res->list); 1769 list_del(&res->list);
1735 __mlx4_unregister_mac(dev, res->port, res->mac); 1770 /* dereference the mac the num times the slave referenced it */
1771 for (i = 0; i < res->ref_count; i++)
1772 __mlx4_unregister_mac(dev, res->port, res->mac);
1736 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port); 1773 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1737 kfree(res); 1774 kfree(res);
1738 } 1775 }
@@ -1744,6 +1781,7 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1744 int err = -EINVAL; 1781 int err = -EINVAL;
1745 int port; 1782 int port;
1746 u64 mac; 1783 u64 mac;
1784 u8 smac_index;
1747 1785
1748 if (op != RES_OP_RESERVE_AND_MAP) 1786 if (op != RES_OP_RESERVE_AND_MAP)
1749 return err; 1787 return err;
@@ -1753,12 +1791,13 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1753 1791
1754 err = __mlx4_register_mac(dev, port, mac); 1792 err = __mlx4_register_mac(dev, port, mac);
1755 if (err >= 0) { 1793 if (err >= 0) {
1794 smac_index = err;
1756 set_param_l(out_param, err); 1795 set_param_l(out_param, err);
1757 err = 0; 1796 err = 0;
1758 } 1797 }
1759 1798
1760 if (!err) { 1799 if (!err) {
1761 err = mac_add_to_slave(dev, slave, mac, port); 1800 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1762 if (err) 1801 if (err)
1763 __mlx4_unregister_mac(dev, port, mac); 1802 __mlx4_unregister_mac(dev, port, mac);
1764 } 1803 }
@@ -3306,6 +3345,25 @@ int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3306 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3345 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3307} 3346}
3308 3347
3348static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3349 struct mlx4_qp_context *qpc,
3350 struct mlx4_cmd_mailbox *inbox)
3351{
3352 u64 mac;
3353 int port;
3354 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3355 u8 sched = *(u8 *)(inbox->buf + 64);
3356 u8 smac_ix;
3357
3358 port = (sched >> 6 & 1) + 1;
3359 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3360 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3361 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3362 return -ENOENT;
3363 }
3364 return 0;
3365}
3366
3309int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, 3367int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3310 struct mlx4_vhcr *vhcr, 3368 struct mlx4_vhcr *vhcr,
3311 struct mlx4_cmd_mailbox *inbox, 3369 struct mlx4_cmd_mailbox *inbox,
@@ -3328,6 +3386,9 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3328 if (err) 3386 if (err)
3329 return err; 3387 return err;
3330 3388
3389 if (roce_verify_mac(dev, slave, qpc, inbox))
3390 return -EINVAL;
3391
3331 update_pkey_index(dev, slave, inbox); 3392 update_pkey_index(dev, slave, inbox);
3332 update_gid(dev, inbox, (u8)slave); 3393 update_gid(dev, inbox, (u8)slave);
3333 adjust_proxy_tun_qkey(dev, vhcr, qpc); 3394 adjust_proxy_tun_qkey(dev, vhcr, qpc);