aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEli Cohen <eli@dev.mellanox.co.il>2010-08-26 10:19:22 -0400
committerRoland Dreier <rolandd@cisco.com>2010-10-25 13:20:39 -0400
commit4c3eb3ca13966508bcb64f39dcdef48be22f1731 (patch)
tree67fde746d256e38421c682501974868971507680
parentaf7bd463761c6abd8ca8d831f9cc0ac19f3b7d4b (diff)
IB/mlx4: Add VLAN support for IBoE
This patch allows IBoE traffic to be encapsulated in 802.1Q tagged VLAN frames. The VLAN tag is encoded in the GID and derived from it by a simple computation. The netdev notifier callback is modified to catch VLAN device addition/removal and the port's GID table is updated to reflect the change, so that for each netdevice there is an entry in the GID table. When the port's GID table is exhausted, GID entries will not be added. Only children of the main interfaces can add to the GID table; if a VLAN interface is added on another VLAN interface (e.g. "vconfig add eth2.6 8"), then that interfaces will not add an entry to the GID table. Signed-off-by: Eli Cohen <eli@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c10
-rw-r--r--drivers/infiniband/hw/mlx4/main.c99
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c79
-rw-r--r--drivers/net/mlx4/en_netdev.c10
-rw-r--r--drivers/net/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/mlx4/port.c19
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mlx4/qp.h2
8 files changed, 193 insertions, 28 deletions
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 3bf3544c0aa0..4b8f9c49397e 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -31,6 +31,7 @@
31 */ 31 */
32 32
33#include <rdma/ib_addr.h> 33#include <rdma/ib_addr.h>
34#include <rdma/ib_cache.h>
34 35
35#include <linux/slab.h> 36#include <linux/slab.h>
36#include <linux/inet.h> 37#include <linux/inet.h>
@@ -91,17 +92,26 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
91{ 92{
92 struct mlx4_ib_dev *ibdev = to_mdev(pd->device); 93 struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
93 struct mlx4_dev *dev = ibdev->dev; 94 struct mlx4_dev *dev = ibdev->dev;
95 union ib_gid sgid;
94 u8 mac[6]; 96 u8 mac[6];
95 int err; 97 int err;
96 int is_mcast; 98 int is_mcast;
99 u16 vlan_tag;
97 100
98 err = mlx4_ib_resolve_grh(ibdev, ah_attr, mac, &is_mcast, ah_attr->port_num); 101 err = mlx4_ib_resolve_grh(ibdev, ah_attr, mac, &is_mcast, ah_attr->port_num);
99 if (err) 102 if (err)
100 return ERR_PTR(err); 103 return ERR_PTR(err);
101 104
102 memcpy(ah->av.eth.mac, mac, 6); 105 memcpy(ah->av.eth.mac, mac, 6);
106 err = ib_get_cached_gid(pd->device, ah_attr->port_num, ah_attr->grh.sgid_index, &sgid);
107 if (err)
108 return ERR_PTR(err);
109 vlan_tag = rdma_get_vlan_id(&sgid);
110 if (vlan_tag < 0x1000)
111 vlan_tag |= (ah_attr->sl & 7) << 13;
103 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); 112 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
104 ah->av.eth.gid_index = ah_attr->grh.sgid_index; 113 ah->av.eth.gid_index = ah_attr->grh.sgid_index;
114 ah->av.eth.vlan = cpu_to_be16(vlan_tag);
105 if (ah_attr->static_rate) { 115 if (ah_attr->static_rate) {
106 ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET; 116 ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
107 while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && 117 while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index e65db73fc277..8736bd836dc0 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -38,6 +38,7 @@
38#include <linux/netdevice.h> 38#include <linux/netdevice.h>
39#include <linux/inetdevice.h> 39#include <linux/inetdevice.h>
40#include <linux/rtnetlink.h> 40#include <linux/rtnetlink.h>
41#include <linux/if_vlan.h>
41 42
42#include <rdma/ib_smi.h> 43#include <rdma/ib_smi.h>
43#include <rdma/ib_user_verbs.h> 44#include <rdma/ib_user_verbs.h>
@@ -79,6 +80,8 @@ static void init_query_mad(struct ib_smp *mad)
79 mad->method = IB_MGMT_METHOD_GET; 80 mad->method = IB_MGMT_METHOD_GET;
80} 81}
81 82
83static union ib_gid zgid;
84
82static int mlx4_ib_query_device(struct ib_device *ibdev, 85static int mlx4_ib_query_device(struct ib_device *ibdev,
83 struct ib_device_attr *props) 86 struct ib_device_attr *props)
84{ 87{
@@ -755,12 +758,17 @@ static struct device_attribute *mlx4_class_attributes[] = {
755 &dev_attr_board_id 758 &dev_attr_board_id
756}; 759};
757 760
758static void mlx4_addrconf_ifid_eui48(u8 *eui, struct net_device *dev) 761static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
759{ 762{
760 memcpy(eui, dev->dev_addr, 3); 763 memcpy(eui, dev->dev_addr, 3);
761 memcpy(eui + 5, dev->dev_addr + 3, 3); 764 memcpy(eui + 5, dev->dev_addr + 3, 3);
762 eui[3] = 0xFF; 765 if (vlan_id < 0x1000) {
763 eui[4] = 0xFE; 766 eui[3] = vlan_id >> 8;
767 eui[4] = vlan_id & 0xff;
768 } else {
769 eui[3] = 0xff;
770 eui[4] = 0xfe;
771 }
764 eui[0] ^= 2; 772 eui[0] ^= 2;
765} 773}
766 774
@@ -802,28 +810,93 @@ static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
802{ 810{
803 struct net_device *ndev = dev->iboe.netdevs[port - 1]; 811 struct net_device *ndev = dev->iboe.netdevs[port - 1];
804 struct update_gid_work *work; 812 struct update_gid_work *work;
813 struct net_device *tmp;
814 int i;
815 u8 *hits;
816 int ret;
817 union ib_gid gid;
818 int free;
819 int found;
820 int need_update = 0;
821 u16 vid;
805 822
806 work = kzalloc(sizeof *work, GFP_ATOMIC); 823 work = kzalloc(sizeof *work, GFP_ATOMIC);
807 if (!work) 824 if (!work)
808 return -ENOMEM; 825 return -ENOMEM;
809 826
810 if (!clear) { 827 hits = kzalloc(128, GFP_ATOMIC);
811 mlx4_addrconf_ifid_eui48(&work->gids[0].raw[8], ndev); 828 if (!hits) {
812 work->gids[0].global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); 829 ret = -ENOMEM;
830 goto out;
831 }
832
833 read_lock(&dev_base_lock);
834 for_each_netdev(&init_net, tmp) {
835 if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
836 gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
837 vid = rdma_vlan_dev_vlan_id(tmp);
838 mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
839 found = 0;
840 free = -1;
841 for (i = 0; i < 128; ++i) {
842 if (free < 0 &&
843 !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
844 free = i;
845 if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
846 hits[i] = 1;
847 found = 1;
848 break;
849 }
850 }
851
852 if (!found) {
853 if (tmp == ndev &&
854 (memcmp(&dev->iboe.gid_table[port - 1][0],
855 &gid, sizeof gid) ||
856 !memcmp(&dev->iboe.gid_table[port - 1][0],
857 &zgid, sizeof gid))) {
858 dev->iboe.gid_table[port - 1][0] = gid;
859 ++need_update;
860 hits[0] = 1;
861 } else if (free >= 0) {
862 dev->iboe.gid_table[port - 1][free] = gid;
863 hits[free] = 1;
864 ++need_update;
865 }
866 }
867 }
813 } 868 }
869 read_unlock(&dev_base_lock);
870
871 for (i = 0; i < 128; ++i)
872 if (!hits[i]) {
873 if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
874 ++need_update;
875 dev->iboe.gid_table[port - 1][i] = zgid;
876 }
814 877
815 INIT_WORK(&work->work, update_gids_task); 878 if (need_update) {
816 work->port = port; 879 memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids);
817 work->dev = dev; 880 INIT_WORK(&work->work, update_gids_task);
818 queue_work(wq, &work->work); 881 work->port = port;
882 work->dev = dev;
883 queue_work(wq, &work->work);
884 } else
885 kfree(work);
819 886
887 kfree(hits);
820 return 0; 888 return 0;
889
890out:
891 kfree(work);
892 return ret;
821} 893}
822 894
823static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event) 895static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
824{ 896{
825 switch (event) { 897 switch (event) {
826 case NETDEV_UP: 898 case NETDEV_UP:
899 case NETDEV_CHANGEADDR:
827 update_ipv6_gids(dev, port, 0); 900 update_ipv6_gids(dev, port, 0);
828 break; 901 break;
829 902
@@ -871,9 +944,11 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event
871 } 944 }
872 } 945 }
873 946
874 if (dev == iboe->netdevs[0]) 947 if (dev == iboe->netdevs[0] ||
948 (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
875 handle_en_event(ibdev, 1, event); 949 handle_en_event(ibdev, 1, event);
876 else if (dev == iboe->netdevs[1]) 950 else if (dev == iboe->netdevs[1]
951 || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
877 handle_en_event(ibdev, 2, event); 952 handle_en_event(ibdev, 2, event);
878 953
879 spin_unlock(&iboe->lock); 954 spin_unlock(&iboe->lock);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 269648445113..9a7794ac34c1 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -37,6 +37,7 @@
37 37
38#include <rdma/ib_cache.h> 38#include <rdma/ib_cache.h>
39#include <rdma/ib_pack.h> 39#include <rdma/ib_pack.h>
40#include <rdma/ib_addr.h>
40 41
41#include <linux/mlx4/qp.h> 42#include <linux/mlx4/qp.h>
42 43
@@ -57,10 +58,11 @@ enum {
57enum { 58enum {
58 /* 59 /*
59 * Largest possible UD header: send with GRH and immediate 60 * Largest possible UD header: send with GRH and immediate
60 * data plus 14 bytes for an Ethernet header. (LRH would only 61 * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
61 * use 8 bytes, so Ethernet is the biggest case) 62 * tag. (LRH would only use 8 bytes, so Ethernet is the
63 * biggest case)
62 */ 64 */
63 MLX4_IB_UD_HEADER_SIZE = 78, 65 MLX4_IB_UD_HEADER_SIZE = 82,
64 MLX4_IB_LSO_HEADER_SPARE = 128, 66 MLX4_IB_LSO_HEADER_SPARE = 128,
65}; 67};
66 68
@@ -879,6 +881,8 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
879 IB_LINK_LAYER_ETHERNET; 881 IB_LINK_LAYER_ETHERNET;
880 u8 mac[6]; 882 u8 mac[6];
881 int is_mcast; 883 int is_mcast;
884 u16 vlan_tag;
885 int vidx;
882 886
883 path->grh_mylmc = ah->src_path_bits & 0x7f; 887 path->grh_mylmc = ah->src_path_bits & 0x7f;
884 path->rlid = cpu_to_be16(ah->dlid); 888 path->rlid = cpu_to_be16(ah->dlid);
@@ -907,10 +911,10 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
907 memcpy(path->rgid, ah->grh.dgid.raw, 16); 911 memcpy(path->rgid, ah->grh.dgid.raw, 16);
908 } 912 }
909 913
910 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
911 ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
912
913 if (is_eth) { 914 if (is_eth) {
915 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
916 ((port - 1) << 6) | ((ah->sl & 7) << 3) | ((ah->sl & 8) >> 1);
917
914 if (!(ah->ah_flags & IB_AH_GRH)) 918 if (!(ah->ah_flags & IB_AH_GRH))
915 return -1; 919 return -1;
916 920
@@ -922,7 +926,18 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
922 path->ackto = MLX4_IB_LINK_TYPE_ETH; 926 path->ackto = MLX4_IB_LINK_TYPE_ETH;
923 /* use index 0 into MAC table for IBoE */ 927 /* use index 0 into MAC table for IBoE */
924 path->grh_mylmc &= 0x80; 928 path->grh_mylmc &= 0x80;
925 } 929
930 vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]);
931 if (vlan_tag < 0x1000) {
932 if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx))
933 return -ENOENT;
934
935 path->vlan_index = vidx;
936 path->fl = 1 << 6;
937 }
938 } else
939 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
940 ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
926 941
927 return 0; 942 return 0;
928} 943}
@@ -1277,13 +1292,16 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1277 struct mlx4_wqe_mlx_seg *mlx = wqe; 1292 struct mlx4_wqe_mlx_seg *mlx = wqe;
1278 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; 1293 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
1279 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); 1294 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
1295 union ib_gid sgid;
1280 u16 pkey; 1296 u16 pkey;
1281 int send_size; 1297 int send_size;
1282 int header_size; 1298 int header_size;
1283 int spc; 1299 int spc;
1284 int i; 1300 int i;
1285 int is_eth; 1301 int is_eth;
1302 int is_vlan = 0;
1286 int is_grh; 1303 int is_grh;
1304 u16 vlan;
1287 1305
1288 send_size = 0; 1306 send_size = 0;
1289 for (i = 0; i < wr->num_sge; ++i) 1307 for (i = 0; i < wr->num_sge; ++i)
@@ -1291,7 +1309,13 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1291 1309
1292 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; 1310 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
1293 is_grh = mlx4_ib_ah_grh_present(ah); 1311 is_grh = mlx4_ib_ah_grh_present(ah);
1294 ib_ud_header_init(send_size, !is_eth, is_eth, 0, is_grh, 0, &sqp->ud_header); 1312 if (is_eth) {
1313 ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24,
1314 ah->av.ib.gid_index, &sgid);
1315 vlan = rdma_get_vlan_id(&sgid);
1316 is_vlan = vlan < 0x1000;
1317 }
1318 ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header);
1295 1319
1296 if (!is_eth) { 1320 if (!is_eth) {
1297 sqp->ud_header.lrh.service_level = 1321 sqp->ud_header.lrh.service_level =
@@ -1345,7 +1369,15 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1345 memcpy(sqp->ud_header.eth.smac_h, smac, 6); 1369 memcpy(sqp->ud_header.eth.smac_h, smac, 6);
1346 if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) 1370 if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
1347 mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); 1371 mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
1348 sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); 1372 if (!is_vlan) {
1373 sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
1374 } else {
1375 u16 pcp;
1376
1377 sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
1378 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 27 & 3) << 13;
1379 sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
1380 }
1349 } else { 1381 } else {
1350 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; 1382 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
1351 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) 1383 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
@@ -1507,13 +1539,14 @@ static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
1507} 1539}
1508 1540
1509static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, 1541static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
1510 struct ib_send_wr *wr) 1542 struct ib_send_wr *wr, __be16 *vlan)
1511{ 1543{
1512 memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); 1544 memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
1513 dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1545 dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1514 dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 1546 dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1515 dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan; 1547 dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan;
1516 memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6); 1548 memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6);
1549 *vlan = dseg->vlan;
1517} 1550}
1518 1551
1519static void set_mlx_icrc_seg(void *dseg) 1552static void set_mlx_icrc_seg(void *dseg)
@@ -1616,6 +1649,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1616 __be32 uninitialized_var(lso_hdr_sz); 1649 __be32 uninitialized_var(lso_hdr_sz);
1617 __be32 blh; 1650 __be32 blh;
1618 int i; 1651 int i;
1652 __be16 vlan = cpu_to_be16(0xffff);
1619 1653
1620 spin_lock_irqsave(&qp->sq.lock, flags); 1654 spin_lock_irqsave(&qp->sq.lock, flags);
1621 1655
@@ -1719,7 +1753,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1719 break; 1753 break;
1720 1754
1721 case IB_QPT_UD: 1755 case IB_QPT_UD:
1722 set_datagram_seg(wqe, wr); 1756 set_datagram_seg(wqe, wr, &vlan);
1723 wqe += sizeof (struct mlx4_wqe_datagram_seg); 1757 wqe += sizeof (struct mlx4_wqe_datagram_seg);
1724 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 1758 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
1725 1759
@@ -1797,6 +1831,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1797 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | 1831 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
1798 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; 1832 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
1799 1833
1834 if (be16_to_cpu(vlan) < 0x1000) {
1835 ctrl->ins_vlan = 1 << 6;
1836 ctrl->vlan_tag = vlan;
1837 }
1838
1800 stamp = ind + qp->sq_spare_wqes; 1839 stamp = ind + qp->sq_spare_wqes;
1801 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); 1840 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
1802 1841
@@ -1946,17 +1985,27 @@ static int to_ib_qp_access_flags(int mlx4_flags)
1946 return ib_flags; 1985 return ib_flags;
1947} 1986}
1948 1987
1949static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr, 1988static void to_ib_ah_attr(struct mlx4_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
1950 struct mlx4_qp_path *path) 1989 struct mlx4_qp_path *path)
1951{ 1990{
1991 struct mlx4_dev *dev = ibdev->dev;
1992 int is_eth;
1993
1952 memset(ib_ah_attr, 0, sizeof *ib_ah_attr); 1994 memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
1953 ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1; 1995 ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1;
1954 1996
1955 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) 1997 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
1956 return; 1998 return;
1957 1999
2000 is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) ==
2001 IB_LINK_LAYER_ETHERNET;
2002 if (is_eth)
2003 ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) |
2004 ((path->sched_queue & 4) << 1);
2005 else
2006 ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf;
2007
1958 ib_ah_attr->dlid = be16_to_cpu(path->rlid); 2008 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
1959 ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf;
1960 ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f; 2009 ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f;
1961 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0; 2010 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
1962 ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0; 2011 ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
@@ -2009,8 +2058,8 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
2009 to_ib_qp_access_flags(be32_to_cpu(context.params2)); 2058 to_ib_qp_access_flags(be32_to_cpu(context.params2));
2010 2059
2011 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { 2060 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
2012 to_ib_ah_attr(dev->dev, &qp_attr->ah_attr, &context.pri_path); 2061 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path);
2013 to_ib_ah_attr(dev->dev, &qp_attr->alt_ah_attr, &context.alt_path); 2062 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path);
2014 qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f; 2063 qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;
2015 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; 2064 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
2016 } 2065 }
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index a0d8a26f5a02..9a87c4f3bbbd 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -69,6 +69,7 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
69 struct mlx4_en_priv *priv = netdev_priv(dev); 69 struct mlx4_en_priv *priv = netdev_priv(dev);
70 struct mlx4_en_dev *mdev = priv->mdev; 70 struct mlx4_en_dev *mdev = priv->mdev;
71 int err; 71 int err;
72 int idx;
72 73
73 if (!priv->vlgrp) 74 if (!priv->vlgrp)
74 return; 75 return;
@@ -83,7 +84,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
83 if (err) 84 if (err)
84 en_err(priv, "Failed configuring VLAN filter\n"); 85 en_err(priv, "Failed configuring VLAN filter\n");
85 } 86 }
87 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
88 en_err(priv, "failed adding vlan %d\n", vid);
86 mutex_unlock(&mdev->state_lock); 89 mutex_unlock(&mdev->state_lock);
90
87} 91}
88 92
89static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 93static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
@@ -91,6 +95,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
91 struct mlx4_en_priv *priv = netdev_priv(dev); 95 struct mlx4_en_priv *priv = netdev_priv(dev);
92 struct mlx4_en_dev *mdev = priv->mdev; 96 struct mlx4_en_dev *mdev = priv->mdev;
93 int err; 97 int err;
98 int idx;
94 99
95 if (!priv->vlgrp) 100 if (!priv->vlgrp)
96 return; 101 return;
@@ -101,6 +106,11 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
101 106
102 /* Remove VID from port VLAN filter */ 107 /* Remove VID from port VLAN filter */
103 mutex_lock(&mdev->state_lock); 108 mutex_lock(&mdev->state_lock);
109 if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
110 mlx4_unregister_vlan(mdev->dev, priv->port, idx);
111 else
112 en_err(priv, "could not find vid %d in cache\n", vid);
113
104 if (mdev->device_up && priv->port_up) { 114 if (mdev->device_up && priv->port_up) {
105 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 115 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
106 if (err) 116 if (err)
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 449210994ee9..dab5eafb8946 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -463,6 +463,7 @@ struct mlx4_en_priv {
463 char *mc_addrs; 463 char *mc_addrs;
464 int mc_addrs_cnt; 464 int mc_addrs_cnt;
465 struct mlx4_en_stat_out_mbox hw_stats; 465 struct mlx4_en_stat_out_mbox hw_stats;
466 int vids[128];
466}; 467};
467 468
468 469
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 606aa58afdea..56371ef328ef 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -182,6 +182,25 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
182 return err; 182 return err;
183} 183}
184 184
185int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
186{
187 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
188 int i;
189
190 for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
191 if (table->refs[i] &&
192 (vid == (MLX4_VLAN_MASK &
193 be32_to_cpu(table->entries[i])))) {
194 /* VLAN already registered, increase reference count */
195 *idx = i;
196 return 0;
197 }
198 }
199
200 return -ENOENT;
201}
202EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
203
185int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) 204int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
186{ 205{
187 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 206 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index ca5645c43f61..ff9893a33e90 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -496,6 +496,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
496int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index); 496int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index);
497void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index); 497void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
498 498
499int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
499int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 500int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
500void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); 501void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
501 502
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 97cfdc8d7e2f..0eeb2a1a867c 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -109,7 +109,7 @@ struct mlx4_qp_path {
109 __be32 tclass_flowlabel; 109 __be32 tclass_flowlabel;
110 u8 rgid[16]; 110 u8 rgid[16];
111 u8 sched_queue; 111 u8 sched_queue;
112 u8 snooper_flags; 112 u8 vlan_index;
113 u8 reserved3[2]; 113 u8 reserved3[2];
114 u8 counter_index; 114 u8 counter_index;
115 u8 reserved4; 115 u8 reserved4;