aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-01-31 12:49:10 -0500
committerDavid S. Miller <davem@davemloft.net>2013-01-31 12:49:10 -0500
commit58d7553d5593292e1bdbfd6423b44caaca0799bd (patch)
treeace64d0ccf89db5b59b596892ba5337aebcff3ec
parent1b13c97fae9c61dc20db8e0d0a72a29df29ac377 (diff)
parent3484aac16149636f0ba5b5b0789a2918c682db7e (diff)
Merge branch 'mlx4'
Merge mlx4 bug fixes from Amir Vadai. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--include/linux/mlx4/device.h3
10 files changed, 152 insertions, 81 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 03447dad07e9..911d48876b32 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -35,6 +35,8 @@
35#include <linux/ethtool.h> 35#include <linux/ethtool.h>
36#include <linux/netdevice.h> 36#include <linux/netdevice.h>
37#include <linux/mlx4/driver.h> 37#include <linux/mlx4/driver.h>
38#include <linux/in.h>
39#include <net/ip.h>
38 40
39#include "mlx4_en.h" 41#include "mlx4_en.h"
40#include "en_port.h" 42#include "en_port.h"
@@ -494,7 +496,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
494 mutex_lock(&mdev->state_lock); 496 mutex_lock(&mdev->state_lock);
495 if (priv->port_up) { 497 if (priv->port_up) {
496 port_up = 1; 498 port_up = 1;
497 mlx4_en_stop_port(dev); 499 mlx4_en_stop_port(dev, 1);
498 } 500 }
499 501
500 mlx4_en_free_resources(priv); 502 mlx4_en_free_resources(priv);
@@ -589,7 +591,7 @@ static int mlx4_en_set_rxfh_indir(struct net_device *dev,
589 mutex_lock(&mdev->state_lock); 591 mutex_lock(&mdev->state_lock);
590 if (priv->port_up) { 592 if (priv->port_up) {
591 port_up = 1; 593 port_up = 1;
592 mlx4_en_stop_port(dev); 594 mlx4_en_stop_port(dev, 1);
593 } 595 }
594 596
595 priv->prof->rss_rings = rss_rings; 597 priv->prof->rss_rings = rss_rings;
@@ -664,27 +666,88 @@ static int mlx4_en_validate_flow(struct net_device *dev,
664 666
665 if ((cmd->fs.flow_type & FLOW_EXT)) { 667 if ((cmd->fs.flow_type & FLOW_EXT)) {
666 if (cmd->fs.m_ext.vlan_etype || 668 if (cmd->fs.m_ext.vlan_etype ||
667 !(cmd->fs.m_ext.vlan_tci == 0 || 669 !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
668 cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff))) 670 0 ||
671 (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
672 cpu_to_be16(VLAN_VID_MASK)))
669 return -EINVAL; 673 return -EINVAL;
674
675 if (cmd->fs.m_ext.vlan_tci) {
676 if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID)
677 return -EINVAL;
678
679 }
670 } 680 }
671 681
672 return 0; 682 return 0;
673} 683}
674 684
685static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
686 struct list_head *rule_list_h,
687 struct mlx4_spec_list *spec_l2,
688 unsigned char *mac)
689{
690 int err = 0;
691 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
692
693 spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
694 memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
695 memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN);
696
697 if ((cmd->fs.flow_type & FLOW_EXT) &&
698 (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
699 spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
700 spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK);
701 }
702
703 list_add_tail(&spec_l2->list, rule_list_h);
704
705 return err;
706}
707
708static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
709 struct ethtool_rxnfc *cmd,
710 struct list_head *rule_list_h,
711 struct mlx4_spec_list *spec_l2,
712 __be32 ipv4_dst)
713{
714 __be64 be_mac = 0;
715 unsigned char mac[ETH_ALEN];
716
717 if (!ipv4_is_multicast(ipv4_dst)) {
718 if (cmd->fs.flow_type & FLOW_MAC_EXT) {
719 memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
720 } else {
721 be_mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16);
722 memcpy(&mac, &be_mac, ETH_ALEN);
723 }
724 } else {
725 ip_eth_mc_map(ipv4_dst, mac);
726 }
727
728 return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]);
729}
730
675static int add_ip_rule(struct mlx4_en_priv *priv, 731static int add_ip_rule(struct mlx4_en_priv *priv,
676 struct ethtool_rxnfc *cmd, 732 struct ethtool_rxnfc *cmd,
677 struct list_head *list_h) 733 struct list_head *list_h)
678{ 734{
679 struct mlx4_spec_list *spec_l3; 735 struct mlx4_spec_list *spec_l2 = NULL;
736 struct mlx4_spec_list *spec_l3 = NULL;
680 struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec; 737 struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
681 738
682 spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL); 739 spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
683 if (!spec_l3) { 740 spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
741 if (!spec_l2 || !spec_l3) {
684 en_err(priv, "Fail to alloc ethtool rule.\n"); 742 en_err(priv, "Fail to alloc ethtool rule.\n");
743 kfree(spec_l2);
744 kfree(spec_l3);
685 return -ENOMEM; 745 return -ENOMEM;
686 } 746 }
687 747
748 mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2,
749 cmd->fs.h_u.
750 usr_ip4_spec.ip4dst);
688 spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4; 751 spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
689 spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src; 752 spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
690 if (l3_mask->ip4src) 753 if (l3_mask->ip4src)
@@ -701,14 +764,17 @@ static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
701 struct ethtool_rxnfc *cmd, 764 struct ethtool_rxnfc *cmd,
702 struct list_head *list_h, int proto) 765 struct list_head *list_h, int proto)
703{ 766{
704 struct mlx4_spec_list *spec_l3; 767 struct mlx4_spec_list *spec_l2 = NULL;
705 struct mlx4_spec_list *spec_l4; 768 struct mlx4_spec_list *spec_l3 = NULL;
769 struct mlx4_spec_list *spec_l4 = NULL;
706 struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec; 770 struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
707 771
708 spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL); 772 spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
709 spec_l4 = kzalloc(sizeof *spec_l4, GFP_KERNEL); 773 spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
710 if (!spec_l4 || !spec_l3) { 774 spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
775 if (!spec_l2 || !spec_l3 || !spec_l4) {
711 en_err(priv, "Fail to alloc ethtool rule.\n"); 776 en_err(priv, "Fail to alloc ethtool rule.\n");
777 kfree(spec_l2);
712 kfree(spec_l3); 778 kfree(spec_l3);
713 kfree(spec_l4); 779 kfree(spec_l4);
714 return -ENOMEM; 780 return -ENOMEM;
@@ -717,12 +783,20 @@ static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
717 spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4; 783 spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
718 784
719 if (proto == TCP_V4_FLOW) { 785 if (proto == TCP_V4_FLOW) {
786 mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
787 spec_l2,
788 cmd->fs.h_u.
789 tcp_ip4_spec.ip4dst);
720 spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP; 790 spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
721 spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src; 791 spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
722 spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst; 792 spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
723 spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc; 793 spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
724 spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst; 794 spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
725 } else { 795 } else {
796 mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
797 spec_l2,
798 cmd->fs.h_u.
799 udp_ip4_spec.ip4dst);
726 spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP; 800 spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
727 spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src; 801 spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
728 spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst; 802 spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
@@ -751,43 +825,23 @@ static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
751 struct list_head *rule_list_h) 825 struct list_head *rule_list_h)
752{ 826{
753 int err; 827 int err;
754 __be64 be_mac;
755 struct ethhdr *eth_spec; 828 struct ethhdr *eth_spec;
756 struct mlx4_en_priv *priv = netdev_priv(dev);
757 struct mlx4_spec_list *spec_l2; 829 struct mlx4_spec_list *spec_l2;
758 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); 830 struct mlx4_en_priv *priv = netdev_priv(dev);
759 831
760 err = mlx4_en_validate_flow(dev, cmd); 832 err = mlx4_en_validate_flow(dev, cmd);
761 if (err) 833 if (err)
762 return err; 834 return err;
763 835
764 spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
765 if (!spec_l2)
766 return -ENOMEM;
767
768 if (cmd->fs.flow_type & FLOW_MAC_EXT) {
769 memcpy(&be_mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
770 } else {
771 u64 mac = priv->mac & MLX4_MAC_MASK;
772 be_mac = cpu_to_be64(mac << 16);
773 }
774
775 spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
776 memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
777 if ((cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) != ETHER_FLOW)
778 memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN);
779
780 if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
781 spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
782 spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff);
783 }
784
785 list_add_tail(&spec_l2->list, rule_list_h);
786
787 switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 836 switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
788 case ETHER_FLOW: 837 case ETHER_FLOW:
838 spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
839 if (!spec_l2)
840 return -ENOMEM;
841
789 eth_spec = &cmd->fs.h_u.ether_spec; 842 eth_spec = &cmd->fs.h_u.ether_spec;
790 memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN); 843 mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2,
844 &eth_spec->h_dest[0]);
791 spec_l2->eth.ether_type = eth_spec->h_proto; 845 spec_l2->eth.ether_type = eth_spec->h_proto;
792 if (eth_spec->h_proto) 846 if (eth_spec->h_proto)
793 spec_l2->eth.ether_type_enable = 1; 847 spec_l2->eth.ether_type_enable = 1;
@@ -861,6 +915,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
861 loc_rule->id = 0; 915 loc_rule->id = 0;
862 memset(&loc_rule->flow_spec, 0, 916 memset(&loc_rule->flow_spec, 0,
863 sizeof(struct ethtool_rx_flow_spec)); 917 sizeof(struct ethtool_rx_flow_spec));
918 list_del(&loc_rule->list);
864 } 919 }
865 err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id); 920 err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
866 if (err) { 921 if (err) {
@@ -871,6 +926,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
871 loc_rule->id = reg_id; 926 loc_rule->id = reg_id;
872 memcpy(&loc_rule->flow_spec, &cmd->fs, 927 memcpy(&loc_rule->flow_spec, &cmd->fs,
873 sizeof(struct ethtool_rx_flow_spec)); 928 sizeof(struct ethtool_rx_flow_spec));
929 list_add_tail(&loc_rule->list, &priv->ethtool_list);
874 930
875out_free_list: 931out_free_list:
876 list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) { 932 list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
@@ -904,6 +960,7 @@ static int mlx4_en_flow_detach(struct net_device *dev,
904 } 960 }
905 rule->id = 0; 961 rule->id = 0;
906 memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec)); 962 memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
963 list_del(&rule->list);
907out: 964out:
908 return err; 965 return err;
909 966
@@ -952,7 +1009,8 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
952 if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT || 1009 if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
953 cmd->cmd == ETHTOOL_GRXCLSRULE || 1010 cmd->cmd == ETHTOOL_GRXCLSRULE ||
954 cmd->cmd == ETHTOOL_GRXCLSRLALL) && 1011 cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
955 mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) 1012 (mdev->dev->caps.steering_mode !=
1013 MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up))
956 return -EINVAL; 1014 return -EINVAL;
957 1015
958 switch (cmd->cmd) { 1016 switch (cmd->cmd) {
@@ -988,7 +1046,8 @@ static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
988 struct mlx4_en_priv *priv = netdev_priv(dev); 1046 struct mlx4_en_priv *priv = netdev_priv(dev);
989 struct mlx4_en_dev *mdev = priv->mdev; 1047 struct mlx4_en_dev *mdev = priv->mdev;
990 1048
991 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) 1049 if (mdev->dev->caps.steering_mode !=
1050 MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)
992 return -EINVAL; 1051 return -EINVAL;
993 1052
994 switch (cmd->cmd) { 1053 switch (cmd->cmd) {
@@ -1037,7 +1096,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
1037 mutex_lock(&mdev->state_lock); 1096 mutex_lock(&mdev->state_lock);
1038 if (priv->port_up) { 1097 if (priv->port_up) {
1039 port_up = 1; 1098 port_up = 1;
1040 mlx4_en_stop_port(dev); 1099 mlx4_en_stop_port(dev, 1);
1041 } 1100 }
1042 1101
1043 mlx4_en_free_resources(priv); 1102 mlx4_en_free_resources(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 9c42812d2f6b..ac1c14f7424a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1039,6 +1039,9 @@ int mlx4_en_start_port(struct net_device *dev)
1039 1039
1040 INIT_LIST_HEAD(&priv->mc_list); 1040 INIT_LIST_HEAD(&priv->mc_list);
1041 INIT_LIST_HEAD(&priv->curr_list); 1041 INIT_LIST_HEAD(&priv->curr_list);
1042 INIT_LIST_HEAD(&priv->ethtool_list);
1043 memset(&priv->ethtool_rules[0], 0,
1044 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
1042 1045
1043 /* Calculate Rx buf size */ 1046 /* Calculate Rx buf size */
1044 dev->mtu = min(dev->mtu, priv->max_mtu); 1047 dev->mtu = min(dev->mtu, priv->max_mtu);
@@ -1175,6 +1178,8 @@ int mlx4_en_start_port(struct net_device *dev)
1175 1178
1176 priv->port_up = true; 1179 priv->port_up = true;
1177 netif_tx_start_all_queues(dev); 1180 netif_tx_start_all_queues(dev);
1181 netif_device_attach(dev);
1182
1178 return 0; 1183 return 0;
1179 1184
1180tx_err: 1185tx_err:
@@ -1197,11 +1202,12 @@ cq_err:
1197} 1202}
1198 1203
1199 1204
1200void mlx4_en_stop_port(struct net_device *dev) 1205void mlx4_en_stop_port(struct net_device *dev, int detach)
1201{ 1206{
1202 struct mlx4_en_priv *priv = netdev_priv(dev); 1207 struct mlx4_en_priv *priv = netdev_priv(dev);
1203 struct mlx4_en_dev *mdev = priv->mdev; 1208 struct mlx4_en_dev *mdev = priv->mdev;
1204 struct mlx4_en_mc_list *mclist, *tmp; 1209 struct mlx4_en_mc_list *mclist, *tmp;
1210 struct ethtool_flow_id *flow, *tmp_flow;
1205 int i; 1211 int i;
1206 u8 mc_list[16] = {0}; 1212 u8 mc_list[16] = {0};
1207 1213
@@ -1212,9 +1218,13 @@ void mlx4_en_stop_port(struct net_device *dev)
1212 1218
1213 /* Synchronize with tx routine */ 1219 /* Synchronize with tx routine */
1214 netif_tx_lock_bh(dev); 1220 netif_tx_lock_bh(dev);
1221 if (detach)
1222 netif_device_detach(dev);
1215 netif_tx_stop_all_queues(dev); 1223 netif_tx_stop_all_queues(dev);
1216 netif_tx_unlock_bh(dev); 1224 netif_tx_unlock_bh(dev);
1217 1225
1226 netif_tx_disable(dev);
1227
1218 /* Set port as not active */ 1228 /* Set port as not active */
1219 priv->port_up = false; 1229 priv->port_up = false;
1220 1230
@@ -1281,7 +1291,19 @@ void mlx4_en_stop_port(struct net_device *dev)
1281 1291
1282 /* Unregister Mac address for the port */ 1292 /* Unregister Mac address for the port */
1283 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); 1293 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
1284 mdev->mac_removed[priv->port] = 1; 1294 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
1295 mdev->mac_removed[priv->port] = 1;
1296
1297 /* Remove flow steering rules for the port*/
1298 if (mdev->dev->caps.steering_mode ==
1299 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1300 ASSERT_RTNL();
1301 list_for_each_entry_safe(flow, tmp_flow,
1302 &priv->ethtool_list, list) {
1303 mlx4_flow_detach(mdev->dev, flow->id);
1304 list_del(&flow->list);
1305 }
1306 }
1285 1307
1286 /* Free RX Rings */ 1308 /* Free RX Rings */
1287 for (i = 0; i < priv->rx_ring_num; i++) { 1309 for (i = 0; i < priv->rx_ring_num; i++) {
@@ -1307,7 +1329,7 @@ static void mlx4_en_restart(struct work_struct *work)
1307 1329
1308 mutex_lock(&mdev->state_lock); 1330 mutex_lock(&mdev->state_lock);
1309 if (priv->port_up) { 1331 if (priv->port_up) {
1310 mlx4_en_stop_port(dev); 1332 mlx4_en_stop_port(dev, 1);
1311 for (i = 0; i < priv->tx_ring_num; i++) 1333 for (i = 0; i < priv->tx_ring_num; i++)
1312 netdev_tx_reset_queue(priv->tx_ring[i].tx_queue); 1334 netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
1313 if (mlx4_en_start_port(dev)) 1335 if (mlx4_en_start_port(dev))
@@ -1379,7 +1401,7 @@ static int mlx4_en_close(struct net_device *dev)
1379 1401
1380 mutex_lock(&mdev->state_lock); 1402 mutex_lock(&mdev->state_lock);
1381 1403
1382 mlx4_en_stop_port(dev); 1404 mlx4_en_stop_port(dev, 0);
1383 netif_carrier_off(dev); 1405 netif_carrier_off(dev);
1384 1406
1385 mutex_unlock(&mdev->state_lock); 1407 mutex_unlock(&mdev->state_lock);
@@ -1517,7 +1539,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
1517 * the port */ 1539 * the port */
1518 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 1540 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
1519 } else { 1541 } else {
1520 mlx4_en_stop_port(dev); 1542 mlx4_en_stop_port(dev, 1);
1521 err = mlx4_en_start_port(dev); 1543 err = mlx4_en_start_port(dev);
1522 if (err) { 1544 if (err) {
1523 en_err(priv, "Failed restarting port:%d\n", 1545 en_err(priv, "Failed restarting port:%d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 8b3d0512a46b..38b62c78d5da 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -127,7 +127,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
127 [0] = "RSS support", 127 [0] = "RSS support",
128 [1] = "RSS Toeplitz Hash Function support", 128 [1] = "RSS Toeplitz Hash Function support",
129 [2] = "RSS XOR Hash Function support", 129 [2] = "RSS XOR Hash Function support",
130 [3] = "Device manage flow steering support" 130 [3] = "Device manage flow steering support",
131 [4] = "Automatic mac reassignment support"
131 }; 132 };
132 int i; 133 int i;
133 134
@@ -478,6 +479,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
478#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 479#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
479#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 480#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
480#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 481#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
482#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
481 483
482 dev_cap->flags2 = 0; 484 dev_cap->flags2 = 0;
483 mailbox = mlx4_alloc_cmd_mailbox(dev); 485 mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -637,6 +639,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
637 QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 639 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
638 MLX4_GET(dev_cap->reserved_lkey, outbox, 640 MLX4_GET(dev_cap->reserved_lkey, outbox,
639 QUERY_DEV_CAP_RSVD_LKEY_OFFSET); 641 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
642 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
643 if (field & 1<<6)
644 dev_cap->flags2 |= MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN;
640 MLX4_GET(dev_cap->max_icm_sz, outbox, 645 MLX4_GET(dev_cap->max_icm_sz, outbox,
641 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); 646 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
642 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) 647 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -1287,14 +1292,14 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1287 /* Enable Ethernet flow steering 1292 /* Enable Ethernet flow steering
1288 * with udp unicast and tcp unicast 1293 * with udp unicast and tcp unicast
1289 */ 1294 */
1290 MLX4_PUT(inbox, param->fs_hash_enable_bits, 1295 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1291 INIT_HCA_FS_ETH_BITS_OFFSET); 1296 INIT_HCA_FS_ETH_BITS_OFFSET);
1292 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, 1297 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1293 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET); 1298 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
1294 /* Enable IPoIB flow steering 1299 /* Enable IPoIB flow steering
1295 * with udp unicast and tcp unicast 1300 * with udp unicast and tcp unicast
1296 */ 1301 */
1297 MLX4_PUT(inbox, param->fs_hash_enable_bits, 1302 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1298 INIT_HCA_FS_IB_BITS_OFFSET); 1303 INIT_HCA_FS_IB_BITS_OFFSET);
1299 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, 1304 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1300 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET); 1305 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index dbf2f69cc59f..3af33ff669cc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -171,7 +171,6 @@ struct mlx4_init_hca_param {
171 u8 log_mpt_sz; 171 u8 log_mpt_sz;
172 u8 log_uar_sz; 172 u8 log_uar_sz;
173 u8 uar_page_sz; /* log pg sz in 4k chunks */ 173 u8 uar_page_sz; /* log pg sz in 4k chunks */
174 u8 fs_hash_enable_bits;
175 u8 steering_mode; /* for QUERY_HCA */ 174 u8 steering_mode; /* for QUERY_HCA */
176 u64 dev_cap_enabled; 175 u64 dev_cap_enabled;
177}; 176};
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f1ee52d10467..e38c6b2e1578 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1415,22 +1415,6 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1415 if (mlx4_is_master(dev)) 1415 if (mlx4_is_master(dev))
1416 mlx4_parav_master_pf_caps(dev); 1416 mlx4_parav_master_pf_caps(dev);
1417 1417
1418 priv->fs_hash_mode = MLX4_FS_L2_HASH;
1419
1420 switch (priv->fs_hash_mode) {
1421 case MLX4_FS_L2_HASH:
1422 init_hca.fs_hash_enable_bits = 0;
1423 break;
1424
1425 case MLX4_FS_L2_L3_L4_HASH:
1426 /* Enable flow steering with
1427 * udp unicast and tcp unicast
1428 */
1429 init_hca.fs_hash_enable_bits =
1430 MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN;
1431 break;
1432 }
1433
1434 profile = default_profile; 1418 profile = default_profile;
1435 if (dev->caps.steering_mode == 1419 if (dev->caps.steering_mode ==
1436 MLX4_STEERING_MODE_DEVICE_MANAGED) 1420 MLX4_STEERING_MODE_DEVICE_MANAGED)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 1ee4db3c6400..52685524708d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -664,7 +664,7 @@ static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
664 dw |= ctrl->priority << 16; 664 dw |= ctrl->priority << 16;
665 665
666 hw->ctrl = cpu_to_be32(dw); 666 hw->ctrl = cpu_to_be32(dw);
667 hw->vf_vep_port = cpu_to_be32(ctrl->port); 667 hw->port = ctrl->port;
668 hw->qpn = cpu_to_be32(ctrl->qpn); 668 hw->qpn = cpu_to_be32(ctrl->qpn);
669} 669}
670 670
@@ -1157,7 +1157,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1157 .priority = MLX4_DOMAIN_NIC, 1157 .priority = MLX4_DOMAIN_NIC,
1158 }; 1158 };
1159 1159
1160 rule.allow_loopback = ~block_mcast_loopback; 1160 rule.allow_loopback = !block_mcast_loopback;
1161 rule.port = port; 1161 rule.port = port;
1162 rule.qpn = qp->qpn; 1162 rule.qpn = qp->qpn;
1163 INIT_LIST_HEAD(&rule.list); 1163 INIT_LIST_HEAD(&rule.list);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 116c5c29d2d1..172daaa29a9e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -60,11 +60,6 @@
60#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7 60#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
61#define MLX4_FS_NUM_MCG (1 << 17) 61#define MLX4_FS_NUM_MCG (1 << 17)
62 62
63enum {
64 MLX4_FS_L2_HASH = 0,
65 MLX4_FS_L2_L3_L4_HASH,
66};
67
68#define MLX4_NUM_UP 8 63#define MLX4_NUM_UP 8
69#define MLX4_NUM_TC 8 64#define MLX4_NUM_TC 8
70#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */ 65#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
@@ -696,9 +691,12 @@ struct mlx4_steer {
696 691
697struct mlx4_net_trans_rule_hw_ctrl { 692struct mlx4_net_trans_rule_hw_ctrl {
698 __be32 ctrl; 693 __be32 ctrl;
699 __be32 vf_vep_port; 694 u8 rsvd1;
695 u8 funcid;
696 u8 vep;
697 u8 port;
700 __be32 qpn; 698 __be32 qpn;
701 __be32 reserved; 699 __be32 rsvd2;
702}; 700};
703 701
704struct mlx4_net_trans_rule_hw_ib { 702struct mlx4_net_trans_rule_hw_ib {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 8d54412ada63..43f01650e585 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -427,6 +427,7 @@ struct mlx4_en_frag_info {
427#endif 427#endif
428 428
429struct ethtool_flow_id { 429struct ethtool_flow_id {
430 struct list_head list;
430 struct ethtool_rx_flow_spec flow_spec; 431 struct ethtool_rx_flow_spec flow_spec;
431 u64 id; 432 u64 id;
432}; 433};
@@ -441,6 +442,8 @@ struct mlx4_en_priv {
441 struct mlx4_en_port_state port_state; 442 struct mlx4_en_port_state port_state;
442 spinlock_t stats_lock; 443 spinlock_t stats_lock;
443 struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES]; 444 struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
445 /* To allow rules removal while port is going down */
446 struct list_head ethtool_list;
444 447
445 unsigned long last_moder_packets[MAX_RX_RINGS]; 448 unsigned long last_moder_packets[MAX_RX_RINGS];
446 unsigned long last_moder_tx_packets; 449 unsigned long last_moder_tx_packets;
@@ -536,7 +539,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
536 struct mlx4_en_port_profile *prof); 539 struct mlx4_en_port_profile *prof);
537 540
538int mlx4_en_start_port(struct net_device *dev); 541int mlx4_en_start_port(struct net_device *dev);
539void mlx4_en_stop_port(struct net_device *dev); 542void mlx4_en_stop_port(struct net_device *dev, int detach);
540 543
541void mlx4_en_free_resources(struct mlx4_en_priv *priv); 544void mlx4_en_free_resources(struct mlx4_en_priv *priv);
542int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); 545int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 561ed2a22a17..5997adc943d0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3018,7 +3018,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
3018 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); 3018 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3019 3019
3020 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 3020 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3021 port = be32_to_cpu(ctrl->vf_vep_port) & 0xff; 3021 port = ctrl->port;
3022 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1); 3022 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3023 3023
3024 /* Clear a space in the inbox for eth header */ 3024 /* Clear a space in the inbox for eth header */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 20ea939c22a6..1883e8e84718 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -150,7 +150,8 @@ enum {
150 MLX4_DEV_CAP_FLAG2_RSS = 1LL << 0, 150 MLX4_DEV_CAP_FLAG2_RSS = 1LL << 0,
151 MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1, 151 MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1,
152 MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2, 152 MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2,
153 MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3 153 MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3,
154 MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN = 1LL << 4
154}; 155};
155 156
156enum { 157enum {