aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4/qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx4/qp.c')
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c79
1 files changed, 64 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 269648445113..9a7794ac34c1 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -37,6 +37,7 @@
37 37
38#include <rdma/ib_cache.h> 38#include <rdma/ib_cache.h>
39#include <rdma/ib_pack.h> 39#include <rdma/ib_pack.h>
40#include <rdma/ib_addr.h>
40 41
41#include <linux/mlx4/qp.h> 42#include <linux/mlx4/qp.h>
42 43
@@ -57,10 +58,11 @@ enum {
57enum { 58enum {
58 /* 59 /*
59 * Largest possible UD header: send with GRH and immediate 60 * Largest possible UD header: send with GRH and immediate
60 * data plus 14 bytes for an Ethernet header. (LRH would only 61 * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
61 * use 8 bytes, so Ethernet is the biggest case) 62 * tag. (LRH would only use 8 bytes, so Ethernet is the
63 * biggest case)
62 */ 64 */
63 MLX4_IB_UD_HEADER_SIZE = 78, 65 MLX4_IB_UD_HEADER_SIZE = 82,
64 MLX4_IB_LSO_HEADER_SPARE = 128, 66 MLX4_IB_LSO_HEADER_SPARE = 128,
65}; 67};
66 68
@@ -879,6 +881,8 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
879 IB_LINK_LAYER_ETHERNET; 881 IB_LINK_LAYER_ETHERNET;
880 u8 mac[6]; 882 u8 mac[6];
881 int is_mcast; 883 int is_mcast;
884 u16 vlan_tag;
885 int vidx;
882 886
883 path->grh_mylmc = ah->src_path_bits & 0x7f; 887 path->grh_mylmc = ah->src_path_bits & 0x7f;
884 path->rlid = cpu_to_be16(ah->dlid); 888 path->rlid = cpu_to_be16(ah->dlid);
@@ -907,10 +911,10 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
907 memcpy(path->rgid, ah->grh.dgid.raw, 16); 911 memcpy(path->rgid, ah->grh.dgid.raw, 16);
908 } 912 }
909 913
910 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
911 ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
912
913 if (is_eth) { 914 if (is_eth) {
915 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
916 ((port - 1) << 6) | ((ah->sl & 7) << 3) | ((ah->sl & 8) >> 1);
917
914 if (!(ah->ah_flags & IB_AH_GRH)) 918 if (!(ah->ah_flags & IB_AH_GRH))
915 return -1; 919 return -1;
916 920
@@ -922,7 +926,18 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
922 path->ackto = MLX4_IB_LINK_TYPE_ETH; 926 path->ackto = MLX4_IB_LINK_TYPE_ETH;
923 /* use index 0 into MAC table for IBoE */ 927 /* use index 0 into MAC table for IBoE */
924 path->grh_mylmc &= 0x80; 928 path->grh_mylmc &= 0x80;
925 } 929
930 vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]);
931 if (vlan_tag < 0x1000) {
932 if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx))
933 return -ENOENT;
934
935 path->vlan_index = vidx;
936 path->fl = 1 << 6;
937 }
938 } else
939 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
940 ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
926 941
927 return 0; 942 return 0;
928} 943}
@@ -1277,13 +1292,16 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1277 struct mlx4_wqe_mlx_seg *mlx = wqe; 1292 struct mlx4_wqe_mlx_seg *mlx = wqe;
1278 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; 1293 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
1279 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); 1294 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
1295 union ib_gid sgid;
1280 u16 pkey; 1296 u16 pkey;
1281 int send_size; 1297 int send_size;
1282 int header_size; 1298 int header_size;
1283 int spc; 1299 int spc;
1284 int i; 1300 int i;
1285 int is_eth; 1301 int is_eth;
1302 int is_vlan = 0;
1286 int is_grh; 1303 int is_grh;
1304 u16 vlan;
1287 1305
1288 send_size = 0; 1306 send_size = 0;
1289 for (i = 0; i < wr->num_sge; ++i) 1307 for (i = 0; i < wr->num_sge; ++i)
@@ -1291,7 +1309,13 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1291 1309
1292 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; 1310 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
1293 is_grh = mlx4_ib_ah_grh_present(ah); 1311 is_grh = mlx4_ib_ah_grh_present(ah);
1294 ib_ud_header_init(send_size, !is_eth, is_eth, 0, is_grh, 0, &sqp->ud_header); 1312 if (is_eth) {
1313 ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24,
1314 ah->av.ib.gid_index, &sgid);
1315 vlan = rdma_get_vlan_id(&sgid);
1316 is_vlan = vlan < 0x1000;
1317 }
1318 ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header);
1295 1319
1296 if (!is_eth) { 1320 if (!is_eth) {
1297 sqp->ud_header.lrh.service_level = 1321 sqp->ud_header.lrh.service_level =
@@ -1345,7 +1369,15 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1345 memcpy(sqp->ud_header.eth.smac_h, smac, 6); 1369 memcpy(sqp->ud_header.eth.smac_h, smac, 6);
1346 if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) 1370 if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
1347 mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); 1371 mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
1348 sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); 1372 if (!is_vlan) {
1373 sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
1374 } else {
1375 u16 pcp;
1376
1377 sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
1378 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 27 & 3) << 13;
1379 sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
1380 }
1349 } else { 1381 } else {
1350 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; 1382 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
1351 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) 1383 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
@@ -1507,13 +1539,14 @@ static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
1507} 1539}
1508 1540
1509static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, 1541static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
1510 struct ib_send_wr *wr) 1542 struct ib_send_wr *wr, __be16 *vlan)
1511{ 1543{
1512 memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); 1544 memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
1513 dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1545 dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1514 dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 1546 dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1515 dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan; 1547 dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan;
1516 memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6); 1548 memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6);
1549 *vlan = dseg->vlan;
1517} 1550}
1518 1551
1519static void set_mlx_icrc_seg(void *dseg) 1552static void set_mlx_icrc_seg(void *dseg)
@@ -1616,6 +1649,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1616 __be32 uninitialized_var(lso_hdr_sz); 1649 __be32 uninitialized_var(lso_hdr_sz);
1617 __be32 blh; 1650 __be32 blh;
1618 int i; 1651 int i;
1652 __be16 vlan = cpu_to_be16(0xffff);
1619 1653
1620 spin_lock_irqsave(&qp->sq.lock, flags); 1654 spin_lock_irqsave(&qp->sq.lock, flags);
1621 1655
@@ -1719,7 +1753,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1719 break; 1753 break;
1720 1754
1721 case IB_QPT_UD: 1755 case IB_QPT_UD:
1722 set_datagram_seg(wqe, wr); 1756 set_datagram_seg(wqe, wr, &vlan);
1723 wqe += sizeof (struct mlx4_wqe_datagram_seg); 1757 wqe += sizeof (struct mlx4_wqe_datagram_seg);
1724 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 1758 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
1725 1759
@@ -1797,6 +1831,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1797 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | 1831 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
1798 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; 1832 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
1799 1833
1834 if (be16_to_cpu(vlan) < 0x1000) {
1835 ctrl->ins_vlan = 1 << 6;
1836 ctrl->vlan_tag = vlan;
1837 }
1838
1800 stamp = ind + qp->sq_spare_wqes; 1839 stamp = ind + qp->sq_spare_wqes;
1801 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); 1840 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
1802 1841
@@ -1946,17 +1985,27 @@ static int to_ib_qp_access_flags(int mlx4_flags)
1946 return ib_flags; 1985 return ib_flags;
1947} 1986}
1948 1987
1949static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr, 1988static void to_ib_ah_attr(struct mlx4_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
1950 struct mlx4_qp_path *path) 1989 struct mlx4_qp_path *path)
1951{ 1990{
1991 struct mlx4_dev *dev = ibdev->dev;
1992 int is_eth;
1993
1952 memset(ib_ah_attr, 0, sizeof *ib_ah_attr); 1994 memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
1953 ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1; 1995 ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1;
1954 1996
1955 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) 1997 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
1956 return; 1998 return;
1957 1999
2000 is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) ==
2001 IB_LINK_LAYER_ETHERNET;
2002 if (is_eth)
2003 ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) |
2004 ((path->sched_queue & 4) << 1);
2005 else
2006 ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf;
2007
1958 ib_ah_attr->dlid = be16_to_cpu(path->rlid); 2008 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
1959 ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf;
1960 ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f; 2009 ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f;
1961 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0; 2010 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
1962 ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0; 2011 ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
@@ -2009,8 +2058,8 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
2009 to_ib_qp_access_flags(be32_to_cpu(context.params2)); 2058 to_ib_qp_access_flags(be32_to_cpu(context.params2));
2010 2059
2011 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { 2060 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
2012 to_ib_ah_attr(dev->dev, &qp_attr->ah_attr, &context.pri_path); 2061 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path);
2013 to_ib_ah_attr(dev->dev, &qp_attr->alt_ah_attr, &context.alt_path); 2062 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path);
2014 qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f; 2063 qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;
2015 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; 2064 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
2016 } 2065 }