aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorMoni Shoua <monis@mellanox.com>2013-12-12 11:03:14 -0500
committerRoland Dreier <roland@purestorage.com>2014-01-18 17:12:53 -0500
commit297e0dad720664dad44baa2cdd13f871979fb58c (patch)
tree2c14a33b5fcabd51e41335884896b5209db29de2 /drivers/infiniband/hw
parentd487ee77740ccf79d7dc1935d4daa77887283028 (diff)
IB/mlx4: Handle Ethernet L2 parameters for IP based GID addressing
IP based RoCE gids don't store Ethernet L2 parameters, MAC and VLAN. Therefore, we need to extract them from the CQE and place them in struct ib_wc (to be used for cases were they were taken from the gid). Also, when modifying a QP or building address handle, instead of parsing the dgid to get the MAC and VLAN, take them from the address handle attributes. Signed-off-by: Moni Shoua <monis@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c40
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c9
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c105
4 files changed, 99 insertions, 58 deletions
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index a251becdaa98..170dca608042 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -39,25 +39,6 @@
39 39
40#include "mlx4_ib.h" 40#include "mlx4_ib.h"
41 41
42int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
43 u8 *mac, int *is_mcast, u8 port)
44{
45 struct in6_addr in6;
46
47 *is_mcast = 0;
48
49 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof in6);
50 if (rdma_link_local_addr(&in6))
51 rdma_get_ll_mac(&in6, mac);
52 else if (rdma_is_multicast_addr(&in6)) {
53 rdma_get_mcast_mac(&in6, mac);
54 *is_mcast = 1;
55 } else
56 return -EINVAL;
57
58 return 0;
59}
60
61static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, 42static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
62 struct mlx4_ib_ah *ah) 43 struct mlx4_ib_ah *ah)
63{ 44{
@@ -92,21 +73,18 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
92{ 73{
93 struct mlx4_ib_dev *ibdev = to_mdev(pd->device); 74 struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
94 struct mlx4_dev *dev = ibdev->dev; 75 struct mlx4_dev *dev = ibdev->dev;
95 union ib_gid sgid;
96 u8 mac[6];
97 int err;
98 int is_mcast; 76 int is_mcast;
77 struct in6_addr in6;
99 u16 vlan_tag; 78 u16 vlan_tag;
100 79
101 err = mlx4_ib_resolve_grh(ibdev, ah_attr, mac, &is_mcast, ah_attr->port_num); 80 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
102 if (err) 81 if (rdma_is_multicast_addr(&in6)) {
103 return ERR_PTR(err); 82 is_mcast = 1;
104 83 rdma_get_mcast_mac(&in6, ah->av.eth.mac);
105 memcpy(ah->av.eth.mac, mac, 6); 84 } else {
106 err = ib_get_cached_gid(pd->device, ah_attr->port_num, ah_attr->grh.sgid_index, &sgid); 85 memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN);
107 if (err) 86 }
108 return ERR_PTR(err); 87 vlan_tag = ah_attr->vlan_id;
109 vlan_tag = rdma_get_vlan_id(&sgid);
110 if (vlan_tag < 0x1000) 88 if (vlan_tag < 0x1000)
111 vlan_tag |= (ah_attr->sl & 7) << 13; 89 vlan_tag |= (ah_attr->sl & 7) << 13;
112 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); 90 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 66dbf8062374..cc40f08ca8f1 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -798,6 +798,15 @@ repoll:
798 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; 798 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
799 else 799 else
800 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; 800 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
801 if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK) {
802 wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
803 MLX4_CQE_VID_MASK;
804 } else {
805 wc->vlan_id = 0xffff;
806 }
807 wc->wc_flags |= IB_WC_WITH_VLAN;
808 memcpy(wc->smac, cqe->smac, ETH_ALEN);
809 wc->wc_flags |= IB_WC_WITH_SMAC;
801 } 810 }
802 811
803 return 0; 812 return 0;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 133f41f42194..c06f571619df 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -678,9 +678,6 @@ int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
678int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 678int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
679 union ib_gid *gid, int netw_view); 679 union ib_gid *gid, int netw_view);
680 680
681int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
682 u8 *mac, int *is_mcast, u8 port);
683
684static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) 681static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
685{ 682{
686 u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3; 683 u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index da6f5fa0c328..e0c2186529ff 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -90,6 +90,21 @@ enum {
90 MLX4_RAW_QP_MSGMAX = 31, 90 MLX4_RAW_QP_MSGMAX = 31,
91}; 91};
92 92
93#ifndef ETH_ALEN
94#define ETH_ALEN 6
95#endif
96static inline u64 mlx4_mac_to_u64(u8 *addr)
97{
98 u64 mac = 0;
99 int i;
100
101 for (i = 0; i < ETH_ALEN; i++) {
102 mac <<= 8;
103 mac |= addr[i];
104 }
105 return mac;
106}
107
93static const __be32 mlx4_ib_opcode[] = { 108static const __be32 mlx4_ib_opcode[] = {
94 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), 109 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
95 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), 110 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
@@ -1144,16 +1159,15 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
1144 path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6); 1159 path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
1145} 1160}
1146 1161
1147static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, 1162static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
1148 struct mlx4_qp_path *path, u8 port) 1163 u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
1164 u8 port)
1149{ 1165{
1150 int err;
1151 int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) == 1166 int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
1152 IB_LINK_LAYER_ETHERNET; 1167 IB_LINK_LAYER_ETHERNET;
1153 u8 mac[6];
1154 int is_mcast;
1155 u16 vlan_tag;
1156 int vidx; 1168 int vidx;
1169 int smac_index;
1170
1157 1171
1158 path->grh_mylmc = ah->src_path_bits & 0x7f; 1172 path->grh_mylmc = ah->src_path_bits & 0x7f;
1159 path->rlid = cpu_to_be16(ah->dlid); 1173 path->rlid = cpu_to_be16(ah->dlid);
@@ -1188,22 +1202,27 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
1188 if (!(ah->ah_flags & IB_AH_GRH)) 1202 if (!(ah->ah_flags & IB_AH_GRH))
1189 return -1; 1203 return -1;
1190 1204
1191 err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port); 1205 memcpy(path->dmac, ah->dmac, ETH_ALEN);
1192 if (err)
1193 return err;
1194
1195 memcpy(path->dmac, mac, 6);
1196 path->ackto = MLX4_IB_LINK_TYPE_ETH; 1206 path->ackto = MLX4_IB_LINK_TYPE_ETH;
1197 /* use index 0 into MAC table for IBoE */ 1207 /* find the index into MAC table for IBoE */
1198 path->grh_mylmc &= 0x80; 1208 if (!is_zero_ether_addr((const u8 *)&smac)) {
1209 if (mlx4_find_cached_mac(dev->dev, port, smac,
1210 &smac_index))
1211 return -ENOENT;
1212 } else {
1213 smac_index = 0;
1214 }
1215
1216 path->grh_mylmc &= 0x80 | smac_index;
1199 1217
1200 vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]); 1218 path->feup |= MLX4_FEUP_FORCE_ETH_UP;
1201 if (vlan_tag < 0x1000) { 1219 if (vlan_tag < 0x1000) {
1202 if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx)) 1220 if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx))
1203 return -ENOENT; 1221 return -ENOENT;
1204 1222
1205 path->vlan_index = vidx; 1223 path->vlan_index = vidx;
1206 path->fl = 1 << 6; 1224 path->fl = 1 << 6;
1225 path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
1207 } 1226 }
1208 } else 1227 } else
1209 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | 1228 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
@@ -1212,6 +1231,28 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
1212 return 0; 1231 return 0;
1213} 1232}
1214 1233
1234static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
1235 enum ib_qp_attr_mask qp_attr_mask,
1236 struct mlx4_qp_path *path, u8 port)
1237{
1238 return _mlx4_set_path(dev, &qp->ah_attr,
1239 mlx4_mac_to_u64((u8 *)qp->smac),
1240 (qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff,
1241 path, port);
1242}
1243
1244static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
1245 const struct ib_qp_attr *qp,
1246 enum ib_qp_attr_mask qp_attr_mask,
1247 struct mlx4_qp_path *path, u8 port)
1248{
1249 return _mlx4_set_path(dev, &qp->alt_ah_attr,
1250 mlx4_mac_to_u64((u8 *)qp->alt_smac),
1251 (qp_attr_mask & IB_QP_ALT_VID) ?
1252 qp->alt_vlan_id : 0xffff,
1253 path, port);
1254}
1255
1215static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) 1256static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1216{ 1257{
1217 struct mlx4_ib_gid_entry *ge, *tmp; 1258 struct mlx4_ib_gid_entry *ge, *tmp;
@@ -1329,7 +1370,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1329 } 1370 }
1330 1371
1331 if (attr_mask & IB_QP_AV) { 1372 if (attr_mask & IB_QP_AV) {
1332 if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path, 1373 if (mlx4_set_path(dev, attr, attr_mask, &context->pri_path,
1333 attr_mask & IB_QP_PORT ? 1374 attr_mask & IB_QP_PORT ?
1334 attr->port_num : qp->port)) 1375 attr->port_num : qp->port))
1335 goto out; 1376 goto out;
@@ -1352,8 +1393,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1352 dev->dev->caps.pkey_table_len[attr->alt_port_num]) 1393 dev->dev->caps.pkey_table_len[attr->alt_port_num])
1353 goto out; 1394 goto out;
1354 1395
1355 if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path, 1396 if (mlx4_set_alt_path(dev, attr, attr_mask, &context->alt_path,
1356 attr->alt_port_num)) 1397 attr->alt_port_num))
1357 goto out; 1398 goto out;
1358 1399
1359 context->alt_path.pkey_index = attr->alt_pkey_index; 1400 context->alt_path.pkey_index = attr->alt_pkey_index;
@@ -1464,6 +1505,17 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1464 context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | 1505 context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
1465 MLX4_IB_LINK_TYPE_ETH; 1506 MLX4_IB_LINK_TYPE_ETH;
1466 1507
1508 if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
1509 int is_eth = rdma_port_get_link_layer(
1510 &dev->ib_dev, qp->port) ==
1511 IB_LINK_LAYER_ETHERNET;
1512 if (is_eth) {
1513 context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH;
1514 optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH;
1515 }
1516 }
1517
1518
1467 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && 1519 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1468 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) 1520 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1469 sqd_event = 1; 1521 sqd_event = 1;
@@ -1561,18 +1613,21 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1561 struct mlx4_ib_qp *qp = to_mqp(ibqp); 1613 struct mlx4_ib_qp *qp = to_mqp(ibqp);
1562 enum ib_qp_state cur_state, new_state; 1614 enum ib_qp_state cur_state, new_state;
1563 int err = -EINVAL; 1615 int err = -EINVAL;
1564 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 1616 int ll;
1565 mutex_lock(&qp->mutex); 1617 mutex_lock(&qp->mutex);
1566 1618
1567 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; 1619 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1568 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 1620 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1569 1621
1570 if (cur_state == new_state && cur_state == IB_QPS_RESET) 1622 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1571 p = IB_LINK_LAYER_UNSPECIFIED; 1623 ll = IB_LINK_LAYER_UNSPECIFIED;
1624 } else {
1625 int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1626 ll = rdma_port_get_link_layer(&dev->ib_dev, port);
1627 }
1572 1628
1573 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 1629 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1574 attr_mask, 1630 attr_mask, ll)) {
1575 rdma_port_get_link_layer(&dev->ib_dev, p))) {
1576 pr_debug("qpn 0x%x: invalid attribute mask specified " 1631 pr_debug("qpn 0x%x: invalid attribute mask specified "
1577 "for transition %d to %d. qp_type %d," 1632 "for transition %d to %d. qp_type %d,"
1578 " attr_mask 0x%x\n", 1633 " attr_mask 0x%x\n",
@@ -1789,8 +1844,10 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1789 return err; 1844 return err;
1790 } 1845 }
1791 1846
1792 vlan = rdma_get_vlan_id(&sgid); 1847 if (ah->av.eth.vlan != 0xffff) {
1793 is_vlan = vlan < 0x1000; 1848 vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
1849 is_vlan = 1;
1850 }
1794 } 1851 }
1795 ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header); 1852 ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header);
1796 1853