aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c4
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h3
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c13
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h15
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c163
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c28
-rw-r--r--drivers/net/ethernet/freescale/fec.h10
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c200
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c44
-rw-r--r--drivers/net/ethernet/ti/cpsw.c163
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c31
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h23
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c35
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c22
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c9
-rw-r--r--include/linux/mlx4/cmd.h6
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/netlink/genetlink.c5
32 files changed, 886 insertions, 161 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 40f22c6794cd..84aecdf06f7a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -386,4 +386,8 @@
386 386
387#define UNDEF_IRO 0x80000000 387#define UNDEF_IRO 0x80000000
388 388
389/* used for defining the amount of FCoE tasks supported for PF */
390#define MAX_FCOE_FUNCS_PER_ENGINE 2
391#define MAX_NUM_FCOE_TASKS_PER_ENGINE 4096
392
389#endif /* BNX2X_FW_DEFS_H */ 393#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index fbfff1b3365e..927f83af9d5c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10791,6 +10791,12 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
10791 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> 10791 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
10792 BNX2X_MAX_FCOE_INIT_CONN_SHIFT; 10792 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
10793 10793
10794 /* Calculate the number of maximum allowed FCoE tasks */
10795 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
10796 if (IS_MF(bp) || CHIP_MODE_IS_4_PORT(bp))
10797 bp->cnic_eth_dev.max_fcoe_exchanges /=
10798 MAX_FCOE_FUNCS_PER_ENGINE;
10799
10794 /* Read the WWN: */ 10800 /* Read the WWN: */
10795 if (!IS_MF(bp)) { 10801 if (!IS_MF(bp)) {
10796 /* Port info */ 10802 /* Port info */
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 149a3a038491..40649a8bf390 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5544,8 +5544,10 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5544 5544
5545 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) 5545 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5546 cdev->max_iscsi_conn = ethdev->max_iscsi_conn; 5546 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5547 if (CNIC_SUPPORTS_FCOE(cp)) 5547 if (CNIC_SUPPORTS_FCOE(cp)) {
5548 cdev->max_fcoe_conn = ethdev->max_fcoe_conn; 5548 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5549 cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
5550 }
5549 5551
5550 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS) 5552 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5551 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS; 5553 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 0c9367a0f57d..ec9bb9ad4bb3 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -195,6 +195,7 @@ struct cnic_eth_dev {
195 u32 max_fcoe_conn; 195 u32 max_fcoe_conn;
196 u32 max_rdma_conn; 196 u32 max_rdma_conn;
197 u32 fcoe_init_cid; 197 u32 fcoe_init_cid;
198 u32 max_fcoe_exchanges;
198 u32 fcoe_wwn_port_name_hi; 199 u32 fcoe_wwn_port_name_hi;
199 u32 fcoe_wwn_port_name_lo; 200 u32 fcoe_wwn_port_name_lo;
200 u32 fcoe_wwn_node_name_hi; 201 u32 fcoe_wwn_node_name_hi;
@@ -313,6 +314,8 @@ struct cnic_dev {
313 int max_fcoe_conn; 314 int max_fcoe_conn;
314 int max_rdma_conn; 315 int max_rdma_conn;
315 316
317 int max_fcoe_exchanges;
318
316 union drv_info_to_mcp *stats_addr; 319 union drv_info_to_mcp *stats_addr;
317 struct fcoe_capabilities *fcoe_cap; 320 struct fcoe_capabilities *fcoe_cap;
318 321
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 791e5ff305d8..4a1f2fa812ab 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1482,7 +1482,7 @@ static int xgmac_set_features(struct net_device *dev, netdev_features_t features
1482 u32 ctrl; 1482 u32 ctrl;
1483 struct xgmac_priv *priv = netdev_priv(dev); 1483 struct xgmac_priv *priv = netdev_priv(dev);
1484 void __iomem *ioaddr = priv->base; 1484 void __iomem *ioaddr = priv->base;
1485 u32 changed = dev->features ^ features; 1485 netdev_features_t changed = dev->features ^ features;
1486 1486
1487 if (!(changed & NETIF_F_RXCSUM)) 1487 if (!(changed & NETIF_F_RXCSUM))
1488 return 0; 1488 return 0;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index e2d5ced7e733..9045903dcda3 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -293,7 +293,7 @@ struct be_drv_stats {
293 u32 rx_in_range_errors; 293 u32 rx_in_range_errors;
294 u32 rx_out_range_errors; 294 u32 rx_out_range_errors;
295 u32 rx_frame_too_long; 295 u32 rx_frame_too_long;
296 u32 rx_address_mismatch_drops; 296 u32 rx_address_filtered;
297 u32 rx_dropped_too_small; 297 u32 rx_dropped_too_small;
298 u32 rx_dropped_too_short; 298 u32 rx_dropped_too_short;
299 u32 rx_dropped_header_too_small; 299 u32 rx_dropped_header_too_small;
@@ -447,6 +447,7 @@ struct be_adapter {
447 u16 max_event_queues; 447 u16 max_event_queues;
448 u32 if_cap_flags; 448 u32 if_cap_flags;
449 u8 pf_number; 449 u8 pf_number;
450 u64 rss_flags;
450}; 451};
451 452
452#define be_physfn(adapter) (!adapter->virtfn) 453#define be_physfn(adapter) (!adapter->virtfn)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index d6291aba2524..d837e4c7ae8b 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1898,7 +1898,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
1898 return status; 1898 return status;
1899} 1899}
1900 1900
1901int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size) 1901int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
1902 u32 rss_hash_opts, u16 table_size)
1902{ 1903{
1903 struct be_mcc_wrb *wrb; 1904 struct be_mcc_wrb *wrb;
1904 struct be_cmd_req_rss_config *req; 1905 struct be_cmd_req_rss_config *req;
@@ -1917,16 +1918,12 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1917 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); 1918 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1918 1919
1919 req->if_id = cpu_to_le32(adapter->if_handle); 1920 req->if_id = cpu_to_le32(adapter->if_handle);
1920 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 | 1921 req->enable_rss = cpu_to_le16(rss_hash_opts);
1921 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6); 1922 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1922 1923
1923 if (lancer_chip(adapter) || skyhawk_chip(adapter)) { 1924 if (lancer_chip(adapter) || skyhawk_chip(adapter))
1924 req->hdr.version = 1; 1925 req->hdr.version = 1;
1925 req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
1926 RSS_ENABLE_UDP_IPV6);
1927 }
1928 1926
1929 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1930 memcpy(req->cpu_table, rsstable, table_size); 1927 memcpy(req->cpu_table, rsstable, table_size);
1931 memcpy(req->hash, myhash, sizeof(myhash)); 1928 memcpy(req->hash, myhash, sizeof(myhash));
1932 be_dws_cpu_to_le(req->hash, sizeof(req->hash)); 1929 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 460332021590..0fc9b4775699 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -590,8 +590,8 @@ struct be_port_rxf_stats_v0 {
590 u32 rx_in_range_errors; /* dword 10*/ 590 u32 rx_in_range_errors; /* dword 10*/
591 u32 rx_out_range_errors; /* dword 11*/ 591 u32 rx_out_range_errors; /* dword 11*/
592 u32 rx_frame_too_long; /* dword 12*/ 592 u32 rx_frame_too_long; /* dword 12*/
593 u32 rx_address_mismatch_drops; /* dword 13*/ 593 u32 rx_address_filtered; /* dword 13*/
594 u32 rx_vlan_mismatch_drops; /* dword 14*/ 594 u32 rx_vlan_filtered; /* dword 14*/
595 u32 rx_dropped_too_small; /* dword 15*/ 595 u32 rx_dropped_too_small; /* dword 15*/
596 u32 rx_dropped_too_short; /* dword 16*/ 596 u32 rx_dropped_too_short; /* dword 16*/
597 u32 rx_dropped_header_too_small; /* dword 17*/ 597 u32 rx_dropped_header_too_small; /* dword 17*/
@@ -797,8 +797,8 @@ struct lancer_pport_stats {
797 u32 rx_control_frames_unknown_opcode_hi; 797 u32 rx_control_frames_unknown_opcode_hi;
798 u32 rx_in_range_errors; 798 u32 rx_in_range_errors;
799 u32 rx_out_of_range_errors; 799 u32 rx_out_of_range_errors;
800 u32 rx_address_mismatch_drops; 800 u32 rx_address_filtered;
801 u32 rx_vlan_mismatch_drops; 801 u32 rx_vlan_filtered;
802 u32 rx_dropped_too_small; 802 u32 rx_dropped_too_small;
803 u32 rx_dropped_too_short; 803 u32 rx_dropped_too_short;
804 u32 rx_dropped_header_too_small; 804 u32 rx_dropped_header_too_small;
@@ -1090,6 +1090,9 @@ struct be_cmd_resp_query_fw_cfg {
1090#define RSS_ENABLE_UDP_IPV4 0x10 1090#define RSS_ENABLE_UDP_IPV4 0x10
1091#define RSS_ENABLE_UDP_IPV6 0x20 1091#define RSS_ENABLE_UDP_IPV6 0x20
1092 1092
1093#define L3_RSS_FLAGS (RXH_IP_DST | RXH_IP_SRC)
1094#define L4_RSS_FLAGS (RXH_L4_B_0_1 | RXH_L4_B_2_3)
1095
1093struct be_cmd_req_rss_config { 1096struct be_cmd_req_rss_config {
1094 struct be_cmd_req_hdr hdr; 1097 struct be_cmd_req_hdr hdr;
1095 u32 if_id; 1098 u32 if_id;
@@ -1573,7 +1576,7 @@ struct be_port_rxf_stats_v1 {
1573 u32 rx_in_range_errors; 1576 u32 rx_in_range_errors;
1574 u32 rx_out_range_errors; 1577 u32 rx_out_range_errors;
1575 u32 rx_frame_too_long; 1578 u32 rx_frame_too_long;
1576 u32 rx_address_mismatch_drops; 1579 u32 rx_address_filtered;
1577 u32 rx_dropped_too_small; 1580 u32 rx_dropped_too_small;
1578 u32 rx_dropped_too_short; 1581 u32 rx_dropped_too_short;
1579 u32 rx_dropped_header_too_small; 1582 u32 rx_dropped_header_too_small;
@@ -1860,7 +1863,7 @@ extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1860 u32 *function_mode, u32 *function_caps, u16 *asic_rev); 1863 u32 *function_mode, u32 *function_caps, u16 *asic_rev);
1861extern int be_cmd_reset_function(struct be_adapter *adapter); 1864extern int be_cmd_reset_function(struct be_adapter *adapter);
1862extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 1865extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
1863 u16 table_size); 1866 u32 rss_hash_opts, u16 table_size);
1864extern int be_process_mcc(struct be_adapter *adapter); 1867extern int be_process_mcc(struct be_adapter *adapter);
1865extern int be_cmd_set_beacon_state(struct be_adapter *adapter, 1868extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
1866 u8 port_num, u8 beacon, u8 status, u8 state); 1869 u8 port_num, u8 beacon, u8 status, u8 state);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 07b7f27cb0b9..ec3050b3133e 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -54,7 +54,7 @@ static const struct be_ethtool_stat et_stats[] = {
54 /* Received packets dropped when they don't pass the unicast or 54 /* Received packets dropped when they don't pass the unicast or
55 * multicast address filtering. 55 * multicast address filtering.
56 */ 56 */
57 {DRVSTAT_INFO(rx_address_mismatch_drops)}, 57 {DRVSTAT_INFO(rx_address_filtered)},
58 /* Received packets dropped when IP packet length field is less than 58 /* Received packets dropped when IP packet length field is less than
59 * the IP header length field. 59 * the IP header length field.
60 */ 60 */
@@ -755,6 +755,12 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
755 int status; 755 int status;
756 u8 link_status = 0; 756 u8 link_status = 0;
757 757
758 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
759 dev_err(&adapter->pdev->dev, "Self test not supported\n");
760 test->flags |= ETH_TEST_FL_FAILED;
761 return;
762 }
763
758 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); 764 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
759 765
760 if (test->flags & ETH_TEST_FL_OFFLINE) { 766 if (test->flags & ETH_TEST_FL_OFFLINE) {
@@ -934,6 +940,159 @@ static void be_set_msg_level(struct net_device *netdev, u32 level)
934 return; 940 return;
935} 941}
936 942
943static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
944{
945 u64 data = 0;
946
947 switch (flow_type) {
948 case TCP_V4_FLOW:
949 if (adapter->rss_flags & RSS_ENABLE_IPV4)
950 data |= RXH_IP_DST | RXH_IP_SRC;
951 if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4)
952 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
953 break;
954 case UDP_V4_FLOW:
955 if (adapter->rss_flags & RSS_ENABLE_IPV4)
956 data |= RXH_IP_DST | RXH_IP_SRC;
957 if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4)
958 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
959 break;
960 case TCP_V6_FLOW:
961 if (adapter->rss_flags & RSS_ENABLE_IPV6)
962 data |= RXH_IP_DST | RXH_IP_SRC;
963 if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6)
964 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
965 break;
966 case UDP_V6_FLOW:
967 if (adapter->rss_flags & RSS_ENABLE_IPV6)
968 data |= RXH_IP_DST | RXH_IP_SRC;
969 if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6)
970 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
971 break;
972 }
973
974 return data;
975}
976
977static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
978 u32 *rule_locs)
979{
980 struct be_adapter *adapter = netdev_priv(netdev);
981
982 if (!be_multi_rxq(adapter)) {
983 dev_info(&adapter->pdev->dev,
984 "ethtool::get_rxnfc: RX flow hashing is disabled\n");
985 return -EINVAL;
986 }
987
988 switch (cmd->cmd) {
989 case ETHTOOL_GRXFH:
990 cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
991 break;
992 case ETHTOOL_GRXRINGS:
993 cmd->data = adapter->num_rx_qs - 1;
994 break;
995 default:
996 return -EINVAL;
997 }
998
999 return 0;
1000}
1001
1002static int be_set_rss_hash_opts(struct be_adapter *adapter,
1003 struct ethtool_rxnfc *cmd)
1004{
1005 struct be_rx_obj *rxo;
1006 int status = 0, i, j;
1007 u8 rsstable[128];
1008 u32 rss_flags = adapter->rss_flags;
1009
1010 if (cmd->data != L3_RSS_FLAGS &&
1011 cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
1012 return -EINVAL;
1013
1014 switch (cmd->flow_type) {
1015 case TCP_V4_FLOW:
1016 if (cmd->data == L3_RSS_FLAGS)
1017 rss_flags &= ~RSS_ENABLE_TCP_IPV4;
1018 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1019 rss_flags |= RSS_ENABLE_IPV4 |
1020 RSS_ENABLE_TCP_IPV4;
1021 break;
1022 case TCP_V6_FLOW:
1023 if (cmd->data == L3_RSS_FLAGS)
1024 rss_flags &= ~RSS_ENABLE_TCP_IPV6;
1025 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1026 rss_flags |= RSS_ENABLE_IPV6 |
1027 RSS_ENABLE_TCP_IPV6;
1028 break;
1029 case UDP_V4_FLOW:
1030 if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
1031 BEx_chip(adapter))
1032 return -EINVAL;
1033
1034 if (cmd->data == L3_RSS_FLAGS)
1035 rss_flags &= ~RSS_ENABLE_UDP_IPV4;
1036 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1037 rss_flags |= RSS_ENABLE_IPV4 |
1038 RSS_ENABLE_UDP_IPV4;
1039 break;
1040 case UDP_V6_FLOW:
1041 if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
1042 BEx_chip(adapter))
1043 return -EINVAL;
1044
1045 if (cmd->data == L3_RSS_FLAGS)
1046 rss_flags &= ~RSS_ENABLE_UDP_IPV6;
1047 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1048 rss_flags |= RSS_ENABLE_IPV6 |
1049 RSS_ENABLE_UDP_IPV6;
1050 break;
1051 default:
1052 return -EINVAL;
1053 }
1054
1055 if (rss_flags == adapter->rss_flags)
1056 return status;
1057
1058 if (be_multi_rxq(adapter)) {
1059 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
1060 for_all_rss_queues(adapter, rxo, i) {
1061 if ((j + i) >= 128)
1062 break;
1063 rsstable[j + i] = rxo->rss_id;
1064 }
1065 }
1066 }
1067 status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128);
1068 if (!status)
1069 adapter->rss_flags = rss_flags;
1070
1071 return status;
1072}
1073
1074static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1075{
1076 struct be_adapter *adapter = netdev_priv(netdev);
1077 int status = 0;
1078
1079 if (!be_multi_rxq(adapter)) {
1080 dev_err(&adapter->pdev->dev,
1081 "ethtool::set_rxnfc: RX flow hashing is disabled\n");
1082 return -EINVAL;
1083 }
1084
1085 switch (cmd->cmd) {
1086 case ETHTOOL_SRXFH:
1087 status = be_set_rss_hash_opts(adapter, cmd);
1088 break;
1089 default:
1090 return -EINVAL;
1091 }
1092
1093 return status;
1094}
1095
937const struct ethtool_ops be_ethtool_ops = { 1096const struct ethtool_ops be_ethtool_ops = {
938 .get_settings = be_get_settings, 1097 .get_settings = be_get_settings,
939 .get_drvinfo = be_get_drvinfo, 1098 .get_drvinfo = be_get_drvinfo,
@@ -957,4 +1116,6 @@ const struct ethtool_ops be_ethtool_ops = {
957 .get_regs = be_get_regs, 1116 .get_regs = be_get_regs,
958 .flash_device = be_do_flash, 1117 .flash_device = be_do_flash,
959 .self_test = be_self_test, 1118 .self_test = be_self_test,
1119 .get_rxnfc = be_get_rxnfc,
1120 .set_rxnfc = be_set_rxnfc,
960}; 1121};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 654e7820daa0..43d5c1e29fc7 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -353,9 +353,9 @@ static void populate_be_v0_stats(struct be_adapter *adapter)
353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow; 353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354 drvs->rx_dropped_header_too_small = 354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small; 355 port_stats->rx_dropped_header_too_small;
356 drvs->rx_address_mismatch_drops = 356 drvs->rx_address_filtered =
357 port_stats->rx_address_mismatch_drops + 357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_mismatch_drops; 358 port_stats->rx_vlan_filtered;
359 drvs->rx_alignment_symbol_errors = 359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors; 360 port_stats->rx_alignment_symbol_errors;
361 361
@@ -404,7 +404,7 @@ static void populate_be_v1_stats(struct be_adapter *adapter)
404 port_stats->rx_dropped_header_too_small; 404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop = 405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop; 406 port_stats->rx_input_fifo_overflow_drop;
407 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops; 407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
408 drvs->rx_alignment_symbol_errors = 408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors; 409 port_stats->rx_alignment_symbol_errors;
410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop; 410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
@@ -445,9 +445,9 @@ static void populate_lancer_stats(struct be_adapter *adapter)
445 drvs->rx_dropped_header_too_small = 445 drvs->rx_dropped_header_too_small =
446 pport_stats->rx_dropped_header_too_small; 446 pport_stats->rx_dropped_header_too_small;
447 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow; 447 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
448 drvs->rx_address_mismatch_drops = 448 drvs->rx_address_filtered =
449 pport_stats->rx_address_mismatch_drops + 449 pport_stats->rx_address_filtered +
450 pport_stats->rx_vlan_mismatch_drops; 450 pport_stats->rx_vlan_filtered;
451 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo; 451 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
452 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow; 452 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
453 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo; 453 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
@@ -2510,9 +2510,19 @@ static int be_rx_qs_create(struct be_adapter *adapter)
2510 rsstable[j + i] = rxo->rss_id; 2510 rsstable[j + i] = rxo->rss_id;
2511 } 2511 }
2512 } 2512 }
2513 rc = be_cmd_rss_config(adapter, rsstable, 128); 2513 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2514 if (rc) 2514 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2515
2516 if (!BEx_chip(adapter))
2517 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2518 RSS_ENABLE_UDP_IPV6;
2519
2520 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2521 128);
2522 if (rc) {
2523 adapter->rss_flags = 0;
2515 return rc; 2524 return rc;
2525 }
2516 } 2526 }
2517 2527
2518 /* First time posting */ 2528 /* First time posting */
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index eb4372962839..d44f65bac1d4 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -52,6 +52,7 @@
52#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ 52#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */
53#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ 53#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */
54#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ 54#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */
55#define FEC_RACC 0x1C4 /* Receive Accelerator function */
55#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ 56#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
56#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ 57#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
57 58
@@ -164,9 +165,11 @@ struct bufdesc_ex {
164#define BD_ENET_TX_CSL ((ushort)0x0001) 165#define BD_ENET_TX_CSL ((ushort)0x0001)
165#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */ 166#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */
166 167
167/*enhanced buffer desciptor control/status used by Ethernet transmit*/ 168/*enhanced buffer descriptor control/status used by Ethernet transmit*/
168#define BD_ENET_TX_INT 0x40000000 169#define BD_ENET_TX_INT 0x40000000
169#define BD_ENET_TX_TS 0x20000000 170#define BD_ENET_TX_TS 0x20000000
171#define BD_ENET_TX_PINS 0x10000000
172#define BD_ENET_TX_IINS 0x08000000
170 173
171 174
172/* This device has up to three irqs on some platforms */ 175/* This device has up to three irqs on some platforms */
@@ -190,6 +193,10 @@ struct bufdesc_ex {
190 193
191#define BD_ENET_RX_INT 0x00800000 194#define BD_ENET_RX_INT 0x00800000
192#define BD_ENET_RX_PTP ((ushort)0x0400) 195#define BD_ENET_RX_PTP ((ushort)0x0400)
196#define BD_ENET_RX_ICE 0x00000020
197#define BD_ENET_RX_PCR 0x00000010
198#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
199#define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
193 200
194/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and 201/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
195 * tx_bd_base always point to the base of the buffer descriptors. The 202 * tx_bd_base always point to the base of the buffer descriptors. The
@@ -247,6 +254,7 @@ struct fec_enet_private {
247 int pause_flag; 254 int pause_flag;
248 255
249 struct napi_struct napi; 256 struct napi_struct napi;
257 int csum_flags;
250 258
251 struct ptp_clock *ptp_clock; 259 struct ptp_clock *ptp_clock;
252 struct ptp_clock_info ptp_caps; 260 struct ptp_clock_info ptp_caps;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 2451ab1b5a83..b9748f14ea78 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -34,6 +34,12 @@
34#include <linux/netdevice.h> 34#include <linux/netdevice.h>
35#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
36#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/in.h>
38#include <linux/ip.h>
39#include <net/ip.h>
40#include <linux/tcp.h>
41#include <linux/udp.h>
42#include <linux/icmp.h>
37#include <linux/spinlock.h> 43#include <linux/spinlock.h>
38#include <linux/workqueue.h> 44#include <linux/workqueue.h>
39#include <linux/bitops.h> 45#include <linux/bitops.h>
@@ -176,6 +182,11 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
176#define PKT_MINBUF_SIZE 64 182#define PKT_MINBUF_SIZE 64
177#define PKT_MAXBLR_SIZE 1520 183#define PKT_MAXBLR_SIZE 1520
178 184
185/* FEC receive acceleration */
186#define FEC_RACC_IPDIS (1 << 1)
187#define FEC_RACC_PRODIS (1 << 2)
188#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
189
179/* 190/*
180 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 191 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
181 * size bits. Other FEC hardware does not, so we need to take that into 192 * size bits. Other FEC hardware does not, so we need to take that into
@@ -236,6 +247,21 @@ static void *swap_buffer(void *bufaddr, int len)
236 return bufaddr; 247 return bufaddr;
237} 248}
238 249
250static int
251fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
252{
253 /* Only run for packets requiring a checksum. */
254 if (skb->ip_summed != CHECKSUM_PARTIAL)
255 return 0;
256
257 if (unlikely(skb_cow_head(skb, 0)))
258 return -1;
259
260 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
261
262 return 0;
263}
264
239static netdev_tx_t 265static netdev_tx_t
240fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 266fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
241{ 267{
@@ -248,7 +274,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
248 unsigned int index; 274 unsigned int index;
249 275
250 if (!fep->link) { 276 if (!fep->link) {
251 /* Link is down or autonegotiation is in progress. */ 277 /* Link is down or auto-negotiation is in progress. */
252 return NETDEV_TX_BUSY; 278 return NETDEV_TX_BUSY;
253 } 279 }
254 280
@@ -265,6 +291,12 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
265 return NETDEV_TX_BUSY; 291 return NETDEV_TX_BUSY;
266 } 292 }
267 293
294 /* Protocol checksum off-load for TCP and UDP. */
295 if (fec_enet_clear_csum(skb, ndev)) {
296 kfree_skb(skb);
297 return NETDEV_TX_OK;
298 }
299
268 /* Clear all of the status flags */ 300 /* Clear all of the status flags */
269 status &= ~BD_ENET_TX_STATS; 301 status &= ~BD_ENET_TX_STATS;
270 302
@@ -321,8 +353,14 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
321 ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT); 353 ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
322 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 354 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
323 } else { 355 } else {
324
325 ebdp->cbd_esc = BD_ENET_TX_INT; 356 ebdp->cbd_esc = BD_ENET_TX_INT;
357
358 /* Enable protocol checksum flags
359 * We do not bother with the IP Checksum bits as they
360 * are done by the kernel
361 */
362 if (skb->ip_summed == CHECKSUM_PARTIAL)
363 ebdp->cbd_esc |= BD_ENET_TX_PINS;
326 } 364 }
327 } 365 }
328 /* If this was the last BD in the ring, start at the beginning again. */ 366 /* If this was the last BD in the ring, start at the beginning again. */
@@ -402,6 +440,7 @@ fec_restart(struct net_device *ndev, int duplex)
402 const struct platform_device_id *id_entry = 440 const struct platform_device_id *id_entry =
403 platform_get_device_id(fep->pdev); 441 platform_get_device_id(fep->pdev);
404 int i; 442 int i;
443 u32 val;
405 u32 temp_mac[2]; 444 u32 temp_mac[2];
406 u32 rcntl = OPT_FRAME_SIZE | 0x04; 445 u32 rcntl = OPT_FRAME_SIZE | 0x04;
407 u32 ecntl = 0x2; /* ETHEREN */ 446 u32 ecntl = 0x2; /* ETHEREN */
@@ -468,6 +507,14 @@ fec_restart(struct net_device *ndev, int duplex)
468 /* Set MII speed */ 507 /* Set MII speed */
469 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 508 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
470 509
510 /* set RX checksum */
511 val = readl(fep->hwp + FEC_RACC);
512 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
513 val |= FEC_RACC_OPTIONS;
514 else
515 val &= ~FEC_RACC_OPTIONS;
516 writel(val, fep->hwp + FEC_RACC);
517
471 /* 518 /*
472 * The phy interface and speed need to get configured 519 * The phy interface and speed need to get configured
473 * differently on enet-mac. 520 * differently on enet-mac.
@@ -525,7 +572,7 @@ fec_restart(struct net_device *ndev, int duplex)
525 fep->phy_dev && fep->phy_dev->pause)) { 572 fep->phy_dev && fep->phy_dev->pause)) {
526 rcntl |= FEC_ENET_FCE; 573 rcntl |= FEC_ENET_FCE;
527 574
528 /* set FIFO thresh hold parameter to reduce overrun */ 575 /* set FIFO threshold parameter to reduce overrun */
529 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); 576 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
530 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); 577 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
531 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); 578 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
@@ -813,6 +860,18 @@ fec_enet_rx(struct net_device *ndev, int budget)
813 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 860 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
814 } 861 }
815 862
863 if (fep->bufdesc_ex &&
864 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
865 struct bufdesc_ex *ebdp =
866 (struct bufdesc_ex *)bdp;
867 if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
868 /* don't check it */
869 skb->ip_summed = CHECKSUM_UNNECESSARY;
870 } else {
871 skb_checksum_none_assert(skb);
872 }
873 }
874
816 if (!skb_defer_rx_timestamp(skb)) 875 if (!skb_defer_rx_timestamp(skb))
817 napi_gro_receive(&fep->napi, skb); 876 napi_gro_receive(&fep->napi, skb);
818 } 877 }
@@ -1614,6 +1673,33 @@ static void fec_poll_controller(struct net_device *dev)
1614} 1673}
1615#endif 1674#endif
1616 1675
1676static int fec_set_features(struct net_device *netdev,
1677 netdev_features_t features)
1678{
1679 struct fec_enet_private *fep = netdev_priv(netdev);
1680 netdev_features_t changed = features ^ netdev->features;
1681
1682 netdev->features = features;
1683
1684 /* Receive checksum has been changed */
1685 if (changed & NETIF_F_RXCSUM) {
1686 if (features & NETIF_F_RXCSUM)
1687 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
1688 else
1689 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
1690
1691 if (netif_running(netdev)) {
1692 fec_stop(netdev);
1693 fec_restart(netdev, fep->phy_dev->duplex);
1694 netif_wake_queue(netdev);
1695 } else {
1696 fec_restart(netdev, fep->phy_dev->duplex);
1697 }
1698 }
1699
1700 return 0;
1701}
1702
1617static const struct net_device_ops fec_netdev_ops = { 1703static const struct net_device_ops fec_netdev_ops = {
1618 .ndo_open = fec_enet_open, 1704 .ndo_open = fec_enet_open,
1619 .ndo_stop = fec_enet_close, 1705 .ndo_stop = fec_enet_close,
@@ -1627,6 +1713,7 @@ static const struct net_device_ops fec_netdev_ops = {
1627#ifdef CONFIG_NET_POLL_CONTROLLER 1713#ifdef CONFIG_NET_POLL_CONTROLLER
1628 .ndo_poll_controller = fec_poll_controller, 1714 .ndo_poll_controller = fec_poll_controller,
1629#endif 1715#endif
1716 .ndo_set_features = fec_set_features,
1630}; 1717};
1631 1718
1632 /* 1719 /*
@@ -1668,6 +1755,13 @@ static int fec_enet_init(struct net_device *ndev)
1668 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 1755 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
1669 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); 1756 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
1670 1757
1758 /* enable hw accelerator */
1759 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
1760 | NETIF_F_RXCSUM);
1761 ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
1762 | NETIF_F_RXCSUM);
1763 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
1764
1671 fec_restart(ndev, 0); 1765 fec_restart(ndev, 0);
1672 1766
1673 return 0; 1767 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 05267d716e86..1df56cc50ee9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1490,6 +1490,69 @@ out:
1490 return ret; 1490 return ret;
1491} 1491}
1492 1492
1493static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1494{
1495 int port, err;
1496 struct mlx4_vport_state *vp_admin;
1497 struct mlx4_vport_oper_state *vp_oper;
1498
1499 for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1500 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1501 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1502 vp_oper->state = *vp_admin;
1503 if (MLX4_VGT != vp_admin->default_vlan) {
1504 err = __mlx4_register_vlan(&priv->dev, port,
1505 vp_admin->default_vlan, &(vp_oper->vlan_idx));
1506 if (err) {
1507 vp_oper->vlan_idx = NO_INDX;
1508 mlx4_warn((&priv->dev),
1509 "No vlan resorces slave %d, port %d\n",
1510 slave, port);
1511 return err;
1512 }
1513 mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n",
1514 (int)(vp_oper->state.default_vlan),
1515 vp_oper->vlan_idx, slave, port);
1516 }
1517 if (vp_admin->spoofchk) {
1518 vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
1519 port,
1520 vp_admin->mac);
1521 if (0 > vp_oper->mac_idx) {
1522 err = vp_oper->mac_idx;
1523 vp_oper->mac_idx = NO_INDX;
1524 mlx4_warn((&priv->dev),
1525 "No mac resorces slave %d, port %d\n",
1526 slave, port);
1527 return err;
1528 }
1529 mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n",
1530 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
1531 }
1532 }
1533 return 0;
1534}
1535
1536static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
1537{
1538 int port;
1539 struct mlx4_vport_oper_state *vp_oper;
1540
1541 for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1542 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1543 if (NO_INDX != vp_oper->vlan_idx) {
1544 __mlx4_unregister_vlan(&priv->dev,
1545 port, vp_oper->vlan_idx);
1546 vp_oper->vlan_idx = NO_INDX;
1547 }
1548 if (NO_INDX != vp_oper->mac_idx) {
1549 __mlx4_unregister_mac(&priv->dev, port, vp_oper->mac_idx);
1550 vp_oper->mac_idx = NO_INDX;
1551 }
1552 }
1553 return;
1554}
1555
1493static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, 1556static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1494 u16 param, u8 toggle) 1557 u16 param, u8 toggle)
1495{ 1558{
@@ -1510,6 +1573,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1510 if (cmd == MLX4_COMM_CMD_RESET) { 1573 if (cmd == MLX4_COMM_CMD_RESET) {
1511 mlx4_warn(dev, "Received reset from slave:%d\n", slave); 1574 mlx4_warn(dev, "Received reset from slave:%d\n", slave);
1512 slave_state[slave].active = false; 1575 slave_state[slave].active = false;
1576 mlx4_master_deactivate_admin_state(priv, slave);
1513 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) { 1577 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
1514 slave_state[slave].event_eq[i].eqn = -1; 1578 slave_state[slave].event_eq[i].eqn = -1;
1515 slave_state[slave].event_eq[i].token = 0; 1579 slave_state[slave].event_eq[i].token = 0;
@@ -1556,6 +1620,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1556 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2) 1620 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
1557 goto reset_slave; 1621 goto reset_slave;
1558 slave_state[slave].vhcr_dma |= param; 1622 slave_state[slave].vhcr_dma |= param;
1623 if (mlx4_master_activate_admin_state(priv, slave))
1624 goto reset_slave;
1559 slave_state[slave].active = true; 1625 slave_state[slave].active = true;
1560 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave); 1626 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
1561 break; 1627 break;
@@ -1732,6 +1798,18 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1732 if (!priv->mfunc.master.slave_state) 1798 if (!priv->mfunc.master.slave_state)
1733 goto err_comm; 1799 goto err_comm;
1734 1800
1801 priv->mfunc.master.vf_admin =
1802 kzalloc(dev->num_slaves *
1803 sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
1804 if (!priv->mfunc.master.vf_admin)
1805 goto err_comm_admin;
1806
1807 priv->mfunc.master.vf_oper =
1808 kzalloc(dev->num_slaves *
1809 sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
1810 if (!priv->mfunc.master.vf_oper)
1811 goto err_comm_oper;
1812
1735 for (i = 0; i < dev->num_slaves; ++i) { 1813 for (i = 0; i < dev->num_slaves; ++i) {
1736 s_state = &priv->mfunc.master.slave_state[i]; 1814 s_state = &priv->mfunc.master.slave_state[i];
1737 s_state->last_cmd = MLX4_COMM_CMD_RESET; 1815 s_state->last_cmd = MLX4_COMM_CMD_RESET;
@@ -1752,6 +1830,10 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1752 goto err_slaves; 1830 goto err_slaves;
1753 } 1831 }
1754 INIT_LIST_HEAD(&s_state->mcast_filters[port]); 1832 INIT_LIST_HEAD(&s_state->mcast_filters[port]);
1833 priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
1834 priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
1835 priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
1836 priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
1755 } 1837 }
1756 spin_lock_init(&s_state->lock); 1838 spin_lock_init(&s_state->lock);
1757 } 1839 }
@@ -1800,6 +1882,10 @@ err_slaves:
1800 for (port = 1; port <= MLX4_MAX_PORTS; port++) 1882 for (port = 1; port <= MLX4_MAX_PORTS; port++)
1801 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); 1883 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
1802 } 1884 }
1885 kfree(priv->mfunc.master.vf_oper);
1886err_comm_oper:
1887 kfree(priv->mfunc.master.vf_admin);
1888err_comm_admin:
1803 kfree(priv->mfunc.master.slave_state); 1889 kfree(priv->mfunc.master.slave_state);
1804err_comm: 1890err_comm:
1805 iounmap(priv->mfunc.comm); 1891 iounmap(priv->mfunc.comm);
@@ -1874,6 +1960,8 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
1874 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); 1960 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
1875 } 1961 }
1876 kfree(priv->mfunc.master.slave_state); 1962 kfree(priv->mfunc.master.slave_state);
1963 kfree(priv->mfunc.master.vf_admin);
1964 kfree(priv->mfunc.master.vf_oper);
1877 } 1965 }
1878 1966
1879 iounmap(priv->mfunc.comm); 1967 iounmap(priv->mfunc.comm);
@@ -1984,3 +2072,115 @@ u32 mlx4_comm_get_version(void)
1984{ 2072{
1985 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER; 2073 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
1986} 2074}
2075
2076static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2077{
2078 if ((vf < 0) || (vf >= dev->num_vfs)) {
2079 mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs);
2080 return -EINVAL;
2081 }
2082
2083 return vf+1;
2084}
2085
2086int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2087{
2088 struct mlx4_priv *priv = mlx4_priv(dev);
2089 struct mlx4_vport_state *s_info;
2090 int slave;
2091
2092 if (!mlx4_is_master(dev))
2093 return -EPROTONOSUPPORT;
2094
2095 slave = mlx4_get_slave_indx(dev, vf);
2096 if (slave < 0)
2097 return -EINVAL;
2098
2099 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2100 s_info->mac = mac;
2101 mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
2102 vf, port, s_info->mac);
2103 return 0;
2104}
2105EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2106
2107int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2108{
2109 struct mlx4_priv *priv = mlx4_priv(dev);
2110 struct mlx4_vport_state *s_info;
2111 int slave;
2112
2113 if ((!mlx4_is_master(dev)) ||
2114 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
2115 return -EPROTONOSUPPORT;
2116
2117 if ((vlan > 4095) || (qos > 7))
2118 return -EINVAL;
2119
2120 slave = mlx4_get_slave_indx(dev, vf);
2121 if (slave < 0)
2122 return -EINVAL;
2123
2124 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2125 if ((0 == vlan) && (0 == qos))
2126 s_info->default_vlan = MLX4_VGT;
2127 else
2128 s_info->default_vlan = vlan;
2129 s_info->default_qos = qos;
2130 return 0;
2131}
2132EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
2133
2134int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
2135{
2136 struct mlx4_priv *priv = mlx4_priv(dev);
2137 struct mlx4_vport_state *s_info;
2138 int slave;
2139
2140 if ((!mlx4_is_master(dev)) ||
2141 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
2142 return -EPROTONOSUPPORT;
2143
2144 slave = mlx4_get_slave_indx(dev, vf);
2145 if (slave < 0)
2146 return -EINVAL;
2147
2148 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2149 s_info->spoofchk = setting;
2150
2151 return 0;
2152}
2153EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
2154
2155int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
2156{
2157 struct mlx4_priv *priv = mlx4_priv(dev);
2158 struct mlx4_vport_state *s_info;
2159 int slave;
2160
2161 if (!mlx4_is_master(dev))
2162 return -EPROTONOSUPPORT;
2163
2164 slave = mlx4_get_slave_indx(dev, vf);
2165 if (slave < 0)
2166 return -EINVAL;
2167
2168 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2169 ivf->vf = vf;
2170
2171 /* need to convert it to a func */
2172 ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
2173 ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
2174 ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
2175 ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
2176 ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
2177 ivf->mac[5] = ((s_info->mac) & 0xff);
2178
2179 ivf->vlan = s_info->default_vlan;
2180 ivf->qos = s_info->default_qos;
2181 ivf->tx_rate = s_info->tx_rate;
2182 ivf->spoofchk = s_info->spoofchk;
2183
2184 return 0;
2185}
2186EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f4f88b846020..a69a908614e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1373,7 +1373,8 @@ static void mlx4_en_service_task(struct work_struct *work)
1373 1373
1374 mutex_lock(&mdev->state_lock); 1374 mutex_lock(&mdev->state_lock);
1375 if (mdev->device_up) { 1375 if (mdev->device_up) {
1376 mlx4_en_ptp_overflow_check(mdev); 1376 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1377 mlx4_en_ptp_overflow_check(mdev);
1377 1378
1378 queue_delayed_work(mdev->workqueue, &priv->service_task, 1379 queue_delayed_work(mdev->workqueue, &priv->service_task,
1379 SERVICE_TASK_DELAY); 1380 SERVICE_TASK_DELAY);
@@ -2023,6 +2024,42 @@ static int mlx4_en_set_features(struct net_device *netdev,
2023 2024
2024} 2025}
2025 2026
2027static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2028{
2029 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2030 struct mlx4_en_dev *mdev = en_priv->mdev;
2031 u64 mac_u64 = mlx4_en_mac_to_u64(mac);
2032
2033 if (!is_valid_ether_addr(mac))
2034 return -EINVAL;
2035
2036 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
2037}
2038
2039static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
2040{
2041 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2042 struct mlx4_en_dev *mdev = en_priv->mdev;
2043
2044 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
2045}
2046
2047static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2048{
2049 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2050 struct mlx4_en_dev *mdev = en_priv->mdev;
2051
2052 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2053}
2054
2055static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2056{
2057 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2058 struct mlx4_en_dev *mdev = en_priv->mdev;
2059
2060 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2061}
2062
2026static const struct net_device_ops mlx4_netdev_ops = { 2063static const struct net_device_ops mlx4_netdev_ops = {
2027 .ndo_open = mlx4_en_open, 2064 .ndo_open = mlx4_en_open,
2028 .ndo_stop = mlx4_en_close, 2065 .ndo_stop = mlx4_en_close,
@@ -2047,6 +2084,33 @@ static const struct net_device_ops mlx4_netdev_ops = {
2047#endif 2084#endif
2048}; 2085};
2049 2086
2087static const struct net_device_ops mlx4_netdev_ops_master = {
2088 .ndo_open = mlx4_en_open,
2089 .ndo_stop = mlx4_en_close,
2090 .ndo_start_xmit = mlx4_en_xmit,
2091 .ndo_select_queue = mlx4_en_select_queue,
2092 .ndo_get_stats = mlx4_en_get_stats,
2093 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2094 .ndo_set_mac_address = mlx4_en_set_mac,
2095 .ndo_validate_addr = eth_validate_addr,
2096 .ndo_change_mtu = mlx4_en_change_mtu,
2097 .ndo_tx_timeout = mlx4_en_tx_timeout,
2098 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2099 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2100 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
2101 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
2102 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
2103 .ndo_get_vf_config = mlx4_en_get_vf_config,
2104#ifdef CONFIG_NET_POLL_CONTROLLER
2105 .ndo_poll_controller = mlx4_en_netpoll,
2106#endif
2107 .ndo_set_features = mlx4_en_set_features,
2108 .ndo_setup_tc = mlx4_en_setup_tc,
2109#ifdef CONFIG_RFS_ACCEL
2110 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2111#endif
2112};
2113
2050int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 2114int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2051 struct mlx4_en_port_profile *prof) 2115 struct mlx4_en_port_profile *prof)
2052{ 2116{
@@ -2163,7 +2227,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2163 /* 2227 /*
2164 * Initialize netdev entry points 2228 * Initialize netdev entry points
2165 */ 2229 */
2166 dev->netdev_ops = &mlx4_netdev_ops; 2230 if (mlx4_is_master(priv->mdev->dev))
2231 dev->netdev_ops = &mlx4_netdev_ops_master;
2232 else
2233 dev->netdev_ops = &mlx4_netdev_ops;
2167 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 2234 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
2168 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 2235 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
2169 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 2236 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
@@ -2228,8 +2295,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2228 } 2295 }
2229 mlx4_en_set_default_moderation(priv); 2296 mlx4_en_set_default_moderation(priv);
2230 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 2297 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
2231 queue_delayed_work(mdev->workqueue, &priv->service_task, 2298
2232 SERVICE_TASK_DELAY); 2299 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2300 queue_delayed_work(mdev->workqueue, &priv->service_task,
2301 SERVICE_TASK_DELAY);
2302
2233 return 0; 2303 return 0;
2234 2304
2235out: 2305out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 6776c257bd34..b147bdd40768 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -468,6 +468,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
468#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 468#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
469#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 469#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
470#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 470#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
471#define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
471#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 472#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
472#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 473#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
473#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 474#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
@@ -655,6 +656,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
655 MLX4_GET(dev_cap->max_counters, outbox, 656 MLX4_GET(dev_cap->max_counters, outbox,
656 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); 657 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
657 658
659 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
660 if (field32 & (1 << 26))
661 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
662 if (field32 & (1 << 20))
663 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
664
658 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 665 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
659 for (i = 1; i <= dev_cap->num_ports; ++i) { 666 for (i = 1; i <= dev_cap->num_ports; ++i) {
660 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 667 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
@@ -784,6 +791,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
784 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW; 791 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
785 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 792 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
786 793
794 /* For guests, disable timestamp */
795 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
796 field &= 0x7f;
797 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
798
787 /* For guests, report Blueflame disabled */ 799 /* For guests, report Blueflame disabled */
788 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); 800 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
789 field &= 0x7f; 801 field &= 0x7f;
@@ -811,6 +823,7 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
811 struct mlx4_cmd_mailbox *outbox, 823 struct mlx4_cmd_mailbox *outbox,
812 struct mlx4_cmd_info *cmd) 824 struct mlx4_cmd_info *cmd)
813{ 825{
826 struct mlx4_priv *priv = mlx4_priv(dev);
814 u64 def_mac; 827 u64 def_mac;
815 u8 port_type; 828 u8 port_type;
816 u16 short_field; 829 u16 short_field;
@@ -828,6 +841,9 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
828 /* set slave default_mac address */ 841 /* set slave default_mac address */
829 MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET); 842 MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
830 def_mac += slave << 8; 843 def_mac += slave << 8;
844 /* if config MAC in DB use it */
845 if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac)
846 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
831 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); 847 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
832 848
833 /* get port type - currently only eth is enabled */ 849 /* get port type - currently only eth is enabled */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 0567f01938ed..eac3dae10efe 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -473,6 +473,30 @@ struct mlx4_slave_state {
473 enum slave_port_state port_state[MLX4_MAX_PORTS + 1]; 473 enum slave_port_state port_state[MLX4_MAX_PORTS + 1];
474}; 474};
475 475
476#define MLX4_VGT 4095
477#define NO_INDX (-1)
478
479struct mlx4_vport_state {
480 u64 mac;
481 u16 default_vlan;
482 u8 default_qos;
483 u32 tx_rate;
484 bool spoofchk;
485};
486
487struct mlx4_vf_admin_state {
488 struct mlx4_vport_state vport[MLX4_MAX_PORTS + 1];
489};
490
491struct mlx4_vport_oper_state {
492 struct mlx4_vport_state state;
493 int mac_idx;
494 int vlan_idx;
495};
496struct mlx4_vf_oper_state {
497 struct mlx4_vport_oper_state vport[MLX4_MAX_PORTS + 1];
498};
499
476struct slave_list { 500struct slave_list {
477 struct mutex mutex; 501 struct mutex mutex;
478 struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE]; 502 struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
@@ -503,6 +527,8 @@ struct mlx4_master_qp0_state {
503 527
504struct mlx4_mfunc_master_ctx { 528struct mlx4_mfunc_master_ctx {
505 struct mlx4_slave_state *slave_state; 529 struct mlx4_slave_state *slave_state;
530 struct mlx4_vf_admin_state *vf_admin;
531 struct mlx4_vf_oper_state *vf_oper;
506 struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; 532 struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
507 int init_port_ref[MLX4_MAX_PORTS + 1]; 533 int init_port_ref[MLX4_MAX_PORTS + 1];
508 u16 max_mtu[MLX4_MAX_PORTS + 1]; 534 u16 max_mtu[MLX4_MAX_PORTS + 1];
@@ -1131,6 +1157,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
1131 1157
1132void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); 1158void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
1133void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); 1159void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
1160void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
1161int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
1134 1162
1135int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); 1163int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz);
1136/* resource tracker functions*/ 1164/* resource tracker functions*/
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 4b6aad39e72c..946e0af5faef 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -141,8 +141,9 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
141 } 141 }
142 142
143 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { 143 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
144 /* MAC already registered, Must not have duplicates */ 144 /* MAC already registered, increment ref count */
145 err = -EEXIST; 145 err = i;
146 ++table->refs[i];
146 goto out; 147 goto out;
147 } 148 }
148 } 149 }
@@ -165,7 +166,7 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
165 table->entries[free] = 0; 166 table->entries[free] = 0;
166 goto out; 167 goto out;
167 } 168 }
168 169 table->refs[free] = 1;
169 err = free; 170 err = free;
170 ++table->total; 171 ++table->total;
171out: 172out:
@@ -206,12 +207,16 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
206 struct mlx4_mac_table *table = &info->mac_table; 207 struct mlx4_mac_table *table = &info->mac_table;
207 int index; 208 int index;
208 209
209 index = find_index(dev, table, mac);
210
211 mutex_lock(&table->mutex); 210 mutex_lock(&table->mutex);
211 index = find_index(dev, table, mac);
212 212
213 if (validate_index(dev, table, index)) 213 if (validate_index(dev, table, index))
214 goto out; 214 goto out;
215 if (--table->refs[index]) {
216 mlx4_dbg(dev, "Have more references for index %d,"
217 "no need to modify mac table\n", index);
218 goto out;
219 }
215 220
216 table->entries[index] = 0; 221 table->entries[index] = 0;
217 mlx4_set_port_mac_table(dev, port, table->entries); 222 mlx4_set_port_mac_table(dev, port, table->entries);
@@ -305,7 +310,7 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
305} 310}
306EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan); 311EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
307 312
308static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, 313int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
309 int *index) 314 int *index)
310{ 315{
311 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 316 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
@@ -379,7 +384,7 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
379} 384}
380EXPORT_SYMBOL_GPL(mlx4_register_vlan); 385EXPORT_SYMBOL_GPL(mlx4_register_vlan);
381 386
382static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) 387void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
383{ 388{
384 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 389 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
385 390
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index f2d64435d8ef..e12e0d2e0ee0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -353,6 +353,47 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
353 } 353 }
354} 354}
355 355
356static int update_vport_qp_param(struct mlx4_dev *dev,
357 struct mlx4_cmd_mailbox *inbox,
358 u8 slave)
359{
360 struct mlx4_qp_context *qpc = inbox->buf + 8;
361 struct mlx4_vport_oper_state *vp_oper;
362 struct mlx4_priv *priv;
363 u32 qp_type;
364 int port;
365
366 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
367 priv = mlx4_priv(dev);
368 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
369
370 if (MLX4_VGT != vp_oper->state.default_vlan) {
371 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
372 if (MLX4_QP_ST_RC == qp_type)
373 return -EINVAL;
374
375 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
376 qpc->pri_path.fl = (1 << 6) | (1 << 2); /* set cv bit and hide_cqe_vlan bit*/
377 qpc->pri_path.feup |= 1 << 3; /* set fvl bit */
378 qpc->pri_path.sched_queue &= 0xC7;
379 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
380 mlx4_dbg(dev, "qp %d port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n",
381 be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
382 (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan,
383 vp_oper->vlan_idx, (int)(qpc->pri_path.feup),
384 (int)(qpc->pri_path.fl));
385 }
386 if (vp_oper->state.spoofchk) {
387 qpc->pri_path.feup |= 1 << 5; /* set fsm bit */;
388 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
389 mlx4_dbg(dev, "spoof qp %d port %d feup 0x%x, myLmc 0x%x mindx %d\n",
390 be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
391 (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc,
392 vp_oper->mac_idx);
393 }
394 return 0;
395}
396
356static int mpt_mask(struct mlx4_dev *dev) 397static int mpt_mask(struct mlx4_dev *dev)
357{ 398{
358 return dev->caps.num_mpts - 1; 399 return dev->caps.num_mpts - 1;
@@ -2798,6 +2839,9 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2798 update_pkey_index(dev, slave, inbox); 2839 update_pkey_index(dev, slave, inbox);
2799 update_gid(dev, inbox, (u8)slave); 2840 update_gid(dev, inbox, (u8)slave);
2800 adjust_proxy_tun_qkey(dev, vhcr, qpc); 2841 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2842 err = update_vport_qp_param(dev, inbox, slave);
2843 if (err)
2844 return err;
2801 2845
2802 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2846 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2803} 2847}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 25c364209a21..4e2d224dd680 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -348,6 +348,7 @@ struct cpsw_priv {
348 /* snapshot of IRQ numbers */ 348 /* snapshot of IRQ numbers */
349 u32 irqs_table[4]; 349 u32 irqs_table[4];
350 u32 num_irqs; 350 u32 num_irqs;
351 bool irq_enabled;
351 struct cpts *cpts; 352 struct cpts *cpts;
352 u32 emac_port; 353 u32 emac_port;
353}; 354};
@@ -355,12 +356,15 @@ struct cpsw_priv {
355#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) 356#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
356#define for_each_slave(priv, func, arg...) \ 357#define for_each_slave(priv, func, arg...) \
357 do { \ 358 do { \
358 int idx; \ 359 struct cpsw_slave *slave; \
360 int n; \
359 if (priv->data.dual_emac) \ 361 if (priv->data.dual_emac) \
360 (func)((priv)->slaves + priv->emac_port, ##arg);\ 362 (func)((priv)->slaves + priv->emac_port, ##arg);\
361 else \ 363 else \
362 for (idx = 0; idx < (priv)->data.slaves; idx++) \ 364 for (n = (priv)->data.slaves, \
363 (func)((priv)->slaves + idx, ##arg); \ 365 slave = (priv)->slaves; \
366 n; n--) \
367 (func)(slave++, ##arg); \
364 } while (0) 368 } while (0)
365#define cpsw_get_slave_ndev(priv, __slave_no__) \ 369#define cpsw_get_slave_ndev(priv, __slave_no__) \
366 (priv->slaves[__slave_no__].ndev) 370 (priv->slaves[__slave_no__].ndev)
@@ -468,62 +472,69 @@ void cpsw_tx_handler(void *token, int len, int status)
468void cpsw_rx_handler(void *token, int len, int status) 472void cpsw_rx_handler(void *token, int len, int status)
469{ 473{
470 struct sk_buff *skb = token; 474 struct sk_buff *skb = token;
475 struct sk_buff *new_skb;
471 struct net_device *ndev = skb->dev; 476 struct net_device *ndev = skb->dev;
472 struct cpsw_priv *priv = netdev_priv(ndev); 477 struct cpsw_priv *priv = netdev_priv(ndev);
473 int ret = 0; 478 int ret = 0;
474 479
475 cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); 480 cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
476 481
477 /* free and bail if we are shutting down */ 482 if (unlikely(status < 0)) {
478 if (unlikely(!netif_running(ndev)) || 483 /* the interface is going down, skbs are purged */
479 unlikely(!netif_carrier_ok(ndev))) {
480 dev_kfree_skb_any(skb); 484 dev_kfree_skb_any(skb);
481 return; 485 return;
482 } 486 }
483 if (likely(status >= 0)) { 487
488 new_skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max);
489 if (new_skb) {
484 skb_put(skb, len); 490 skb_put(skb, len);
485 cpts_rx_timestamp(priv->cpts, skb); 491 cpts_rx_timestamp(priv->cpts, skb);
486 skb->protocol = eth_type_trans(skb, ndev); 492 skb->protocol = eth_type_trans(skb, ndev);
487 netif_receive_skb(skb); 493 netif_receive_skb(skb);
488 priv->stats.rx_bytes += len; 494 priv->stats.rx_bytes += len;
489 priv->stats.rx_packets++; 495 priv->stats.rx_packets++;
490 skb = NULL; 496 } else {
491 } 497 priv->stats.rx_dropped++;
492 498 new_skb = skb;
493 if (unlikely(!netif_running(ndev))) {
494 if (skb)
495 dev_kfree_skb_any(skb);
496 return;
497 } 499 }
498 500
499 if (likely(!skb)) { 501 ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data,
500 skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max); 502 skb_tailroom(new_skb), 0);
501 if (WARN_ON(!skb)) 503 if (WARN_ON(ret < 0))
502 return; 504 dev_kfree_skb_any(new_skb);
503
504 ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
505 skb_tailroom(skb), 0, GFP_KERNEL);
506 }
507 WARN_ON(ret < 0);
508} 505}
509 506
510static irqreturn_t cpsw_interrupt(int irq, void *dev_id) 507static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
511{ 508{
512 struct cpsw_priv *priv = dev_id; 509 struct cpsw_priv *priv = dev_id;
510 u32 rx, tx, rx_thresh;
513 511
514 if (likely(netif_running(priv->ndev))) { 512 rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat);
515 cpsw_intr_disable(priv); 513 rx = __raw_readl(&priv->wr_regs->rx_stat);
514 tx = __raw_readl(&priv->wr_regs->tx_stat);
515 if (!rx_thresh && !rx && !tx)
516 return IRQ_NONE;
517
518 cpsw_intr_disable(priv);
519 if (priv->irq_enabled == true) {
516 cpsw_disable_irq(priv); 520 cpsw_disable_irq(priv);
521 priv->irq_enabled = false;
522 }
523
524 if (netif_running(priv->ndev)) {
517 napi_schedule(&priv->napi); 525 napi_schedule(&priv->napi);
518 } else { 526 return IRQ_HANDLED;
519 priv = cpsw_get_slave_priv(priv, 1);
520 if (likely(priv) && likely(netif_running(priv->ndev))) {
521 cpsw_intr_disable(priv);
522 cpsw_disable_irq(priv);
523 napi_schedule(&priv->napi);
524 }
525 } 527 }
526 return IRQ_HANDLED; 528
529 priv = cpsw_get_slave_priv(priv, 1);
530 if (!priv)
531 return IRQ_NONE;
532
533 if (netif_running(priv->ndev)) {
534 napi_schedule(&priv->napi);
535 return IRQ_HANDLED;
536 }
537 return IRQ_NONE;
527} 538}
528 539
529static int cpsw_poll(struct napi_struct *napi, int budget) 540static int cpsw_poll(struct napi_struct *napi, int budget)
@@ -537,10 +548,16 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
537 548
538 num_rx = cpdma_chan_process(priv->rxch, budget); 549 num_rx = cpdma_chan_process(priv->rxch, budget);
539 if (num_rx < budget) { 550 if (num_rx < budget) {
551 struct cpsw_priv *prim_cpsw;
552
540 napi_complete(napi); 553 napi_complete(napi);
541 cpsw_intr_enable(priv); 554 cpsw_intr_enable(priv);
542 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 555 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
543 cpsw_enable_irq(priv); 556 prim_cpsw = cpsw_get_slave_priv(priv, 0);
557 if (prim_cpsw->irq_enabled == false) {
558 cpsw_enable_irq(priv);
559 prim_cpsw->irq_enabled = true;
560 }
544 } 561 }
545 562
546 if (num_rx || num_tx) 563 if (num_rx || num_tx)
@@ -736,14 +753,14 @@ static inline int cpsw_tx_packet_submit(struct net_device *ndev,
736{ 753{
737 if (!priv->data.dual_emac) 754 if (!priv->data.dual_emac)
738 return cpdma_chan_submit(priv->txch, skb, skb->data, 755 return cpdma_chan_submit(priv->txch, skb, skb->data,
739 skb->len, 0, GFP_KERNEL); 756 skb->len, 0);
740 757
741 if (ndev == cpsw_get_slave_ndev(priv, 0)) 758 if (ndev == cpsw_get_slave_ndev(priv, 0))
742 return cpdma_chan_submit(priv->txch, skb, skb->data, 759 return cpdma_chan_submit(priv->txch, skb, skb->data,
743 skb->len, 1, GFP_KERNEL); 760 skb->len, 1);
744 else 761 else
745 return cpdma_chan_submit(priv->txch, skb, skb->data, 762 return cpdma_chan_submit(priv->txch, skb, skb->data,
746 skb->len, 2, GFP_KERNEL); 763 skb->len, 2);
747} 764}
748 765
749static inline void cpsw_add_dual_emac_def_ale_entries( 766static inline void cpsw_add_dual_emac_def_ale_entries(
@@ -867,9 +884,19 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
867 } 884 }
868} 885}
869 886
887static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
888{
889 if (!slave->phy)
890 return;
891 phy_stop(slave->phy);
892 phy_disconnect(slave->phy);
893 slave->phy = NULL;
894}
895
870static int cpsw_ndo_open(struct net_device *ndev) 896static int cpsw_ndo_open(struct net_device *ndev)
871{ 897{
872 struct cpsw_priv *priv = netdev_priv(ndev); 898 struct cpsw_priv *priv = netdev_priv(ndev);
899 struct cpsw_priv *prim_cpsw;
873 int i, ret; 900 int i, ret;
874 u32 reg; 901 u32 reg;
875 902
@@ -912,14 +939,16 @@ static int cpsw_ndo_open(struct net_device *ndev)
912 struct sk_buff *skb; 939 struct sk_buff *skb;
913 940
914 ret = -ENOMEM; 941 ret = -ENOMEM;
915 skb = netdev_alloc_skb_ip_align(priv->ndev, 942 skb = __netdev_alloc_skb_ip_align(priv->ndev,
916 priv->rx_packet_max); 943 priv->rx_packet_max, GFP_KERNEL);
917 if (!skb) 944 if (!skb)
918 break; 945 goto err_cleanup;
919 ret = cpdma_chan_submit(priv->rxch, skb, skb->data, 946 ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
920 skb_tailroom(skb), 0, GFP_KERNEL); 947 skb_tailroom(skb), 0);
921 if (WARN_ON(ret < 0)) 948 if (ret < 0) {
922 break; 949 kfree_skb(skb);
950 goto err_cleanup;
951 }
923 } 952 }
924 /* continue even if we didn't manage to submit all 953 /* continue even if we didn't manage to submit all
925 * receive descs 954 * receive descs
@@ -935,6 +964,14 @@ static int cpsw_ndo_open(struct net_device *ndev)
935 cpsw_set_coalesce(ndev, &coal); 964 cpsw_set_coalesce(ndev, &coal);
936 } 965 }
937 966
967 prim_cpsw = cpsw_get_slave_priv(priv, 0);
968 if (prim_cpsw->irq_enabled == false) {
969 if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
970 prim_cpsw->irq_enabled = true;
971 cpsw_enable_irq(prim_cpsw);
972 }
973 }
974
938 cpdma_ctlr_start(priv->dma); 975 cpdma_ctlr_start(priv->dma);
939 cpsw_intr_enable(priv); 976 cpsw_intr_enable(priv);
940 napi_enable(&priv->napi); 977 napi_enable(&priv->napi);
@@ -944,15 +981,13 @@ static int cpsw_ndo_open(struct net_device *ndev)
944 if (priv->data.dual_emac) 981 if (priv->data.dual_emac)
945 priv->slaves[priv->emac_port].open_stat = true; 982 priv->slaves[priv->emac_port].open_stat = true;
946 return 0; 983 return 0;
947}
948 984
949static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) 985err_cleanup:
950{ 986 cpdma_ctlr_stop(priv->dma);
951 if (!slave->phy) 987 for_each_slave(priv, cpsw_slave_stop, priv);
952 return; 988 pm_runtime_put_sync(&priv->pdev->dev);
953 phy_stop(slave->phy); 989 netif_carrier_off(priv->ndev);
954 phy_disconnect(slave->phy); 990 return ret;
955 slave->phy = NULL;
956} 991}
957 992
958static int cpsw_ndo_stop(struct net_device *ndev) 993static int cpsw_ndo_stop(struct net_device *ndev)
@@ -1598,7 +1633,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1598 priv_sl2->irqs_table[i] = priv->irqs_table[i]; 1633 priv_sl2->irqs_table[i] = priv->irqs_table[i];
1599 priv_sl2->num_irqs = priv->num_irqs; 1634 priv_sl2->num_irqs = priv->num_irqs;
1600 } 1635 }
1601 1636 priv->irq_enabled = true;
1602 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1637 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1603 1638
1604 ndev->netdev_ops = &cpsw_netdev_ops; 1639 ndev->netdev_ops = &cpsw_netdev_ops;
@@ -1619,7 +1654,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1619 1654
1620static int cpsw_probe(struct platform_device *pdev) 1655static int cpsw_probe(struct platform_device *pdev)
1621{ 1656{
1622 struct cpsw_platform_data *data = pdev->dev.platform_data; 1657 struct cpsw_platform_data *data;
1623 struct net_device *ndev; 1658 struct net_device *ndev;
1624 struct cpsw_priv *priv; 1659 struct cpsw_priv *priv;
1625 struct cpdma_params dma_params; 1660 struct cpdma_params dma_params;
@@ -1832,7 +1867,7 @@ static int cpsw_probe(struct platform_device *pdev)
1832 goto clean_ale_ret; 1867 goto clean_ale_ret;
1833 } 1868 }
1834 priv->irqs_table[k] = i; 1869 priv->irqs_table[k] = i;
1835 priv->num_irqs = k; 1870 priv->num_irqs = k + 1;
1836 } 1871 }
1837 k++; 1872 k++;
1838 } 1873 }
@@ -1870,7 +1905,8 @@ static int cpsw_probe(struct platform_device *pdev)
1870 return 0; 1905 return 0;
1871 1906
1872clean_irq_ret: 1907clean_irq_ret:
1873 free_irq(ndev->irq, priv); 1908 for (i = 0; i < priv->num_irqs; i++)
1909 free_irq(priv->irqs_table[i], priv);
1874clean_ale_ret: 1910clean_ale_ret:
1875 cpsw_ale_destroy(priv->ale); 1911 cpsw_ale_destroy(priv->ale);
1876clean_dma_ret: 1912clean_dma_ret:
@@ -1893,7 +1929,8 @@ clean_slave_ret:
1893 pm_runtime_disable(&pdev->dev); 1929 pm_runtime_disable(&pdev->dev);
1894 kfree(priv->slaves); 1930 kfree(priv->slaves);
1895clean_ndev_ret: 1931clean_ndev_ret:
1896 free_netdev(ndev); 1932 kfree(priv->data.slave_data);
1933 free_netdev(priv->ndev);
1897 return ret; 1934 return ret;
1898} 1935}
1899 1936
@@ -1901,12 +1938,17 @@ static int cpsw_remove(struct platform_device *pdev)
1901{ 1938{
1902 struct net_device *ndev = platform_get_drvdata(pdev); 1939 struct net_device *ndev = platform_get_drvdata(pdev);
1903 struct cpsw_priv *priv = netdev_priv(ndev); 1940 struct cpsw_priv *priv = netdev_priv(ndev);
1941 int i;
1904 1942
1905 pr_info("removing device");
1906 platform_set_drvdata(pdev, NULL); 1943 platform_set_drvdata(pdev, NULL);
1944 if (priv->data.dual_emac)
1945 unregister_netdev(cpsw_get_slave_ndev(priv, 1));
1946 unregister_netdev(ndev);
1907 1947
1908 cpts_unregister(priv->cpts); 1948 cpts_unregister(priv->cpts);
1909 free_irq(ndev->irq, priv); 1949 for (i = 0; i < priv->num_irqs; i++)
1950 free_irq(priv->irqs_table[i], priv);
1951
1910 cpsw_ale_destroy(priv->ale); 1952 cpsw_ale_destroy(priv->ale);
1911 cpdma_chan_destroy(priv->txch); 1953 cpdma_chan_destroy(priv->txch);
1912 cpdma_chan_destroy(priv->rxch); 1954 cpdma_chan_destroy(priv->rxch);
@@ -1920,8 +1962,10 @@ static int cpsw_remove(struct platform_device *pdev)
1920 pm_runtime_disable(&pdev->dev); 1962 pm_runtime_disable(&pdev->dev);
1921 clk_put(priv->clk); 1963 clk_put(priv->clk);
1922 kfree(priv->slaves); 1964 kfree(priv->slaves);
1965 kfree(priv->data.slave_data);
1966 if (priv->data.dual_emac)
1967 free_netdev(cpsw_get_slave_ndev(priv, 1));
1923 free_netdev(ndev); 1968 free_netdev(ndev);
1924
1925 return 0; 1969 return 0;
1926} 1970}
1927 1971
@@ -1957,6 +2001,7 @@ static const struct of_device_id cpsw_of_mtable[] = {
1957 { .compatible = "ti,cpsw", }, 2001 { .compatible = "ti,cpsw", },
1958 { /* sentinel */ }, 2002 { /* sentinel */ },
1959}; 2003};
2004MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
1960 2005
1961static struct platform_driver cpsw_driver = { 2006static struct platform_driver cpsw_driver = {
1962 .driver = { 2007 .driver = {
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index ee13dc78430c..49dfd592ac1e 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -20,6 +20,7 @@
20#include <linux/err.h> 20#include <linux/err.h>
21#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/delay.h>
23 24
24#include "davinci_cpdma.h" 25#include "davinci_cpdma.h"
25 26
@@ -312,14 +313,16 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
312 } 313 }
313 314
314 if (ctlr->params.has_soft_reset) { 315 if (ctlr->params.has_soft_reset) {
315 unsigned long timeout = jiffies + HZ/10; 316 unsigned timeout = 10 * 100;
316 317
317 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); 318 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
318 while (time_before(jiffies, timeout)) { 319 while (timeout) {
319 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) 320 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
320 break; 321 break;
322 udelay(10);
323 timeout--;
321 } 324 }
322 WARN_ON(!time_before(jiffies, timeout)); 325 WARN_ON(!timeout);
323 } 326 }
324 327
325 for (i = 0; i < ctlr->num_chan; i++) { 328 for (i = 0; i < ctlr->num_chan; i++) {
@@ -673,7 +676,7 @@ static void __cpdma_chan_submit(struct cpdma_chan *chan,
673} 676}
674 677
675int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, 678int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
676 int len, int directed, gfp_t gfp_mask) 679 int len, int directed)
677{ 680{
678 struct cpdma_ctlr *ctlr = chan->ctlr; 681 struct cpdma_ctlr *ctlr = chan->ctlr;
679 struct cpdma_desc __iomem *desc; 682 struct cpdma_desc __iomem *desc;
@@ -773,6 +776,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
773 struct cpdma_ctlr *ctlr = chan->ctlr; 776 struct cpdma_ctlr *ctlr = chan->ctlr;
774 struct cpdma_desc __iomem *desc; 777 struct cpdma_desc __iomem *desc;
775 int status, outlen; 778 int status, outlen;
779 int cb_status = 0;
776 struct cpdma_desc_pool *pool = ctlr->pool; 780 struct cpdma_desc_pool *pool = ctlr->pool;
777 dma_addr_t desc_dma; 781 dma_addr_t desc_dma;
778 unsigned long flags; 782 unsigned long flags;
@@ -808,8 +812,12 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
808 } 812 }
809 813
810 spin_unlock_irqrestore(&chan->lock, flags); 814 spin_unlock_irqrestore(&chan->lock, flags);
815 if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
816 cb_status = -ENOSYS;
817 else
818 cb_status = status;
811 819
812 __cpdma_chan_free(chan, desc, outlen, status); 820 __cpdma_chan_free(chan, desc, outlen, cb_status);
813 return status; 821 return status;
814 822
815unlock_ret: 823unlock_ret:
@@ -868,7 +876,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
868 struct cpdma_desc_pool *pool = ctlr->pool; 876 struct cpdma_desc_pool *pool = ctlr->pool;
869 unsigned long flags; 877 unsigned long flags;
870 int ret; 878 int ret;
871 unsigned long timeout; 879 unsigned timeout;
872 880
873 spin_lock_irqsave(&chan->lock, flags); 881 spin_lock_irqsave(&chan->lock, flags);
874 if (chan->state != CPDMA_STATE_ACTIVE) { 882 if (chan->state != CPDMA_STATE_ACTIVE) {
@@ -883,14 +891,15 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
883 dma_reg_write(ctlr, chan->td, chan_linear(chan)); 891 dma_reg_write(ctlr, chan->td, chan_linear(chan));
884 892
885 /* wait for teardown complete */ 893 /* wait for teardown complete */
886 timeout = jiffies + HZ/10; /* 100 msec */ 894 timeout = 100 * 100; /* 100 ms */
887 while (time_before(jiffies, timeout)) { 895 while (timeout) {
888 u32 cp = chan_read(chan, cp); 896 u32 cp = chan_read(chan, cp);
889 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) 897 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
890 break; 898 break;
891 cpu_relax(); 899 udelay(10);
900 timeout--;
892 } 901 }
893 WARN_ON(!time_before(jiffies, timeout)); 902 WARN_ON(!timeout);
894 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); 903 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
895 904
896 /* handle completed packets */ 905 /* handle completed packets */
@@ -1031,3 +1040,5 @@ unlock_ret:
1031 return ret; 1040 return ret;
1032} 1041}
1033EXPORT_SYMBOL_GPL(cpdma_control_set); 1042EXPORT_SYMBOL_GPL(cpdma_control_set);
1043
1044MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index d9bcc6032fdc..86dee487f2f0 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -89,7 +89,7 @@ int cpdma_chan_dump(struct cpdma_chan *chan);
89int cpdma_chan_get_stats(struct cpdma_chan *chan, 89int cpdma_chan_get_stats(struct cpdma_chan *chan,
90 struct cpdma_chan_stats *stats); 90 struct cpdma_chan_stats *stats);
91int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, 91int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
92 int len, int directed, gfp_t gfp_mask); 92 int len, int directed);
93int cpdma_chan_process(struct cpdma_chan *chan, int quota); 93int cpdma_chan_process(struct cpdma_chan *chan, int quota);
94 94
95int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable); 95int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 6a0b47715a84..860e15ddfbcb 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1037,7 +1037,7 @@ static void emac_rx_handler(void *token, int len, int status)
1037 1037
1038recycle: 1038recycle:
1039 ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, 1039 ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
1040 skb_tailroom(skb), 0, GFP_KERNEL); 1040 skb_tailroom(skb), 0);
1041 1041
1042 WARN_ON(ret == -ENOMEM); 1042 WARN_ON(ret == -ENOMEM);
1043 if (unlikely(ret < 0)) 1043 if (unlikely(ret < 0))
@@ -1092,7 +1092,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
1092 skb_tx_timestamp(skb); 1092 skb_tx_timestamp(skb);
1093 1093
1094 ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len, 1094 ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
1095 0, GFP_KERNEL); 1095 0);
1096 if (unlikely(ret_code != 0)) { 1096 if (unlikely(ret_code != 0)) {
1097 if (netif_msg_tx_err(priv) && net_ratelimit()) 1097 if (netif_msg_tx_err(priv) && net_ratelimit())
1098 dev_err(emac_dev, "DaVinci EMAC: desc submit failed"); 1098 dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
@@ -1558,7 +1558,7 @@ static int emac_dev_open(struct net_device *ndev)
1558 break; 1558 break;
1559 1559
1560 ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, 1560 ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
1561 skb_tailroom(skb), 0, GFP_KERNEL); 1561 skb_tailroom(skb), 0);
1562 if (WARN_ON(ret < 0)) 1562 if (WARN_ON(ret < 0))
1563 break; 1563 break;
1564 } 1564 }
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index d04a622b08d4..12aec173564c 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -485,6 +485,7 @@ static const struct of_device_id davinci_mdio_of_mtable[] = {
485 { .compatible = "ti,davinci_mdio", }, 485 { .compatible = "ti,davinci_mdio", },
486 { /* sentinel */ }, 486 { /* sentinel */ },
487}; 487};
488MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
488 489
489static struct platform_driver davinci_mdio_driver = { 490static struct platform_driver davinci_mdio_driver = {
490 .driver = { 491 .driver = {
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 50fcd018d14b..11596b2c4702 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -88,9 +88,6 @@
88 88
89#define BNX2FC_MAX_NPIV 256 89#define BNX2FC_MAX_NPIV 256
90 90
91#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048
92#define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS
93#define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE
94#define BNX2FC_MIN_PAYLOAD 256 91#define BNX2FC_MIN_PAYLOAD 256
95#define BNX2FC_MAX_PAYLOAD 2048 92#define BNX2FC_MAX_PAYLOAD 2048
96#define BNX2FC_MFS \ 93#define BNX2FC_MFS \
@@ -108,11 +105,8 @@
108#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) 105#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe))
109#define BNX2FC_5771X_DB_PAGE_SIZE 128 106#define BNX2FC_5771X_DB_PAGE_SIZE 128
110 107
111#define BNX2FC_MAX_TASKS \
112 (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS)
113#define BNX2FC_TASK_SIZE 128 108#define BNX2FC_TASK_SIZE 128
114#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) 109#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
115#define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE)
116 110
117#define BNX2FC_MAX_ROWS_IN_HASH_TBL 8 111#define BNX2FC_MAX_ROWS_IN_HASH_TBL 8
118#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024) 112#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024)
@@ -125,12 +119,9 @@
125#define BNX2FC_WRITE (1 << 0) 119#define BNX2FC_WRITE (1 << 0)
126 120
127#define BNX2FC_MIN_XID 0 121#define BNX2FC_MIN_XID 0
128#define BNX2FC_MAX_XID \
129 (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS - 1)
130#define FCOE_MAX_NUM_XIDS 0x2000 122#define FCOE_MAX_NUM_XIDS 0x2000
131#define FCOE_MIN_XID (BNX2FC_MAX_XID + 1) 123#define FCOE_MAX_XID_OFFSET (FCOE_MAX_NUM_XIDS - 1)
132#define FCOE_MAX_XID (FCOE_MIN_XID + FCOE_MAX_NUM_XIDS - 1) 124#define FCOE_XIDS_PER_CPU_OFFSET ((512 * nr_cpu_ids) - 1)
133#define FCOE_XIDS_PER_CPU (FCOE_MIN_XID + (512 * nr_cpu_ids) - 1)
134#define BNX2FC_MAX_LUN 0xFFFF 125#define BNX2FC_MAX_LUN 0xFFFF
135#define BNX2FC_MAX_FCP_TGT 256 126#define BNX2FC_MAX_FCP_TGT 256
136#define BNX2FC_MAX_CMD_LEN 16 127#define BNX2FC_MAX_CMD_LEN 16
@@ -206,6 +197,13 @@ struct bnx2fc_hba {
206 #define BNX2FC_FLAG_FW_INIT_DONE 0 197 #define BNX2FC_FLAG_FW_INIT_DONE 0
207 #define BNX2FC_FLAG_DESTROY_CMPL 1 198 #define BNX2FC_FLAG_DESTROY_CMPL 1
208 u32 next_conn_id; 199 u32 next_conn_id;
200
201 /* xid resources */
202 u16 max_xid;
203 u32 max_tasks;
204 u32 max_outstanding_cmds;
205 u32 elstm_xids;
206
209 struct fcoe_task_ctx_entry **task_ctx; 207 struct fcoe_task_ctx_entry **task_ctx;
210 dma_addr_t *task_ctx_dma; 208 dma_addr_t *task_ctx_dma;
211 struct regpair *task_ctx_bd_tbl; 209 struct regpair *task_ctx_bd_tbl;
@@ -504,8 +502,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba);
504void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba); 502void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba);
505int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba); 503int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba);
506void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba); 504void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba);
507struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, 505struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba);
508 u16 min_xid, u16 max_xid);
509void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr); 506void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr);
510void bnx2fc_get_link_state(struct bnx2fc_hba *hba); 507void bnx2fc_get_link_state(struct bnx2fc_hba *hba);
511char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items); 508char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 90bc7bd00966..7dffec1e5715 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -71,7 +71,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb);
71static void bnx2fc_start_disc(struct bnx2fc_interface *interface); 71static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
72static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev); 72static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
73static int bnx2fc_lport_config(struct fc_lport *lport); 73static int bnx2fc_lport_config(struct fc_lport *lport);
74static int bnx2fc_em_config(struct fc_lport *lport); 74static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba);
75static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba); 75static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
76static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba); 76static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
77static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); 77static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
@@ -944,16 +944,17 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
944 return 0; 944 return 0;
945} 945}
946 946
947static int bnx2fc_em_config(struct fc_lport *lport) 947static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba)
948{ 948{
949 int max_xid; 949 int fcoe_min_xid, fcoe_max_xid;
950 950
951 fcoe_min_xid = hba->max_xid + 1;
951 if (nr_cpu_ids <= 2) 952 if (nr_cpu_ids <= 2)
952 max_xid = FCOE_XIDS_PER_CPU; 953 fcoe_max_xid = hba->max_xid + FCOE_XIDS_PER_CPU_OFFSET;
953 else 954 else
954 max_xid = FCOE_MAX_XID; 955 fcoe_max_xid = hba->max_xid + FCOE_MAX_XID_OFFSET;
955 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID, 956 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, fcoe_min_xid,
956 max_xid, NULL)) { 957 fcoe_max_xid, NULL)) {
957 printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n"); 958 printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
958 return -ENOMEM; 959 return -ENOMEM;
959 } 960 }
@@ -1300,6 +1301,12 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1300 mutex_init(&hba->hba_mutex); 1301 mutex_init(&hba->hba_mutex);
1301 1302
1302 hba->cnic = cnic; 1303 hba->cnic = cnic;
1304
1305 hba->max_tasks = cnic->max_fcoe_exchanges;
1306 hba->elstm_xids = (hba->max_tasks / 2);
1307 hba->max_outstanding_cmds = hba->elstm_xids;
1308 hba->max_xid = (hba->max_tasks - 1);
1309
1303 rc = bnx2fc_bind_pcidev(hba); 1310 rc = bnx2fc_bind_pcidev(hba);
1304 if (rc) { 1311 if (rc) {
1305 printk(KERN_ERR PFX "create_adapter: bind error\n"); 1312 printk(KERN_ERR PFX "create_adapter: bind error\n");
@@ -1318,8 +1325,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1318 1325
1319 hba->num_ofld_sess = 0; 1326 hba->num_ofld_sess = 0;
1320 1327
1321 hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID, 1328 hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba);
1322 BNX2FC_MAX_XID);
1323 if (!hba->cmd_mgr) { 1329 if (!hba->cmd_mgr) {
1324 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n"); 1330 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
1325 goto cmgr_err; 1331 goto cmgr_err;
@@ -1330,13 +1336,13 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1330 FCOE_IOS_PER_CONNECTION_SHIFT; 1336 FCOE_IOS_PER_CONNECTION_SHIFT;
1331 fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS << 1337 fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS <<
1332 FCOE_LOGINS_PER_PORT_SHIFT; 1338 FCOE_LOGINS_PER_PORT_SHIFT;
1333 fcoe_cap->capability2 = BNX2FC_MAX_OUTSTANDING_CMNDS << 1339 fcoe_cap->capability2 = hba->max_outstanding_cmds <<
1334 FCOE_NUMBER_OF_EXCHANGES_SHIFT; 1340 FCOE_NUMBER_OF_EXCHANGES_SHIFT;
1335 fcoe_cap->capability2 |= BNX2FC_MAX_NPIV << 1341 fcoe_cap->capability2 |= BNX2FC_MAX_NPIV <<
1336 FCOE_NPIV_WWN_PER_PORT_SHIFT; 1342 FCOE_NPIV_WWN_PER_PORT_SHIFT;
1337 fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS << 1343 fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS <<
1338 FCOE_TARGETS_SUPPORTED_SHIFT; 1344 FCOE_TARGETS_SUPPORTED_SHIFT;
1339 fcoe_cap->capability3 |= BNX2FC_MAX_OUTSTANDING_CMNDS << 1345 fcoe_cap->capability3 |= hba->max_outstanding_cmds <<
1340 FCOE_OUTSTANDING_COMMANDS_SHIFT; 1346 FCOE_OUTSTANDING_COMMANDS_SHIFT;
1341 fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL; 1347 fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL;
1342 1348
@@ -1416,7 +1422,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1416 struct Scsi_Host *shost; 1422 struct Scsi_Host *shost;
1417 struct fc_vport *vport = dev_to_vport(parent); 1423 struct fc_vport *vport = dev_to_vport(parent);
1418 struct bnx2fc_lport *blport; 1424 struct bnx2fc_lport *blport;
1419 struct bnx2fc_hba *hba; 1425 struct bnx2fc_hba *hba = interface->hba;
1420 int rc = 0; 1426 int rc = 0;
1421 1427
1422 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL); 1428 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
@@ -1426,6 +1432,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1426 } 1432 }
1427 1433
1428 /* Allocate Scsi_Host structure */ 1434 /* Allocate Scsi_Host structure */
1435 bnx2fc_shost_template.can_queue = hba->max_outstanding_cmds;
1429 if (!npiv) 1436 if (!npiv)
1430 lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port)); 1437 lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port));
1431 else 1438 else
@@ -1477,7 +1484,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1477 1484
1478 /* Allocate exchange manager */ 1485 /* Allocate exchange manager */
1479 if (!npiv) 1486 if (!npiv)
1480 rc = bnx2fc_em_config(lport); 1487 rc = bnx2fc_em_config(lport, hba);
1481 else { 1488 else {
1482 shost = vport_to_shost(vport); 1489 shost = vport_to_shost(vport);
1483 n_port = shost_priv(shost); 1490 n_port = shost_priv(shost);
@@ -1491,7 +1498,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1491 1498
1492 bnx2fc_interface_get(interface); 1499 bnx2fc_interface_get(interface);
1493 1500
1494 hba = interface->hba;
1495 spin_lock_bh(&hba->hba_lock); 1501 spin_lock_bh(&hba->hba_lock);
1496 blport->lport = lport; 1502 blport->lport = lport;
1497 list_add_tail(&blport->list, &hba->vports); 1503 list_add_tail(&blport->list, &hba->vports);
@@ -2706,7 +2712,6 @@ static struct scsi_host_template bnx2fc_shost_template = {
2706 .change_queue_type = fc_change_queue_type, 2712 .change_queue_type = fc_change_queue_type,
2707 .this_id = -1, 2713 .this_id = -1,
2708 .cmd_per_lun = 3, 2714 .cmd_per_lun = 3,
2709 .can_queue = BNX2FC_CAN_QUEUE,
2710 .use_clustering = ENABLE_CLUSTERING, 2715 .use_clustering = ENABLE_CLUSTERING,
2711 .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, 2716 .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
2712 .max_sectors = 1024, 2717 .max_sectors = 1024,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 85ea98a80f40..50510ffe1bf5 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -77,7 +77,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
77 fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE << 77 fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
78 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 78 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
79 79
80 fcoe_init1.num_tasks = BNX2FC_MAX_TASKS; 80 fcoe_init1.num_tasks = hba->max_tasks;
81 fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX; 81 fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
82 fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX; 82 fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
83 fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ; 83 fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
@@ -697,7 +697,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
697 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); 697 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
698 698
699 699
700 if (xid > BNX2FC_MAX_XID) { 700 if (xid > hba->max_xid) {
701 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", 701 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
702 xid); 702 xid);
703 goto ret_err_rqe; 703 goto ret_err_rqe;
@@ -815,7 +815,7 @@ ret_err_rqe:
815 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", 815 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
816 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); 816 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
817 817
818 if (xid > BNX2FC_MAX_XID) { 818 if (xid > hba->max_xid) {
819 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid); 819 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
820 goto ret_warn_rqe; 820 goto ret_warn_rqe;
821 } 821 }
@@ -880,7 +880,7 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
880 880
881 spin_lock_bh(&tgt->tgt_lock); 881 spin_lock_bh(&tgt->tgt_lock);
882 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; 882 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
883 if (xid >= BNX2FC_MAX_TASKS) { 883 if (xid >= hba->max_tasks) {
884 printk(KERN_ERR PFX "ERROR:xid out of range\n"); 884 printk(KERN_ERR PFX "ERROR:xid out of range\n");
885 spin_unlock_bh(&tgt->tgt_lock); 885 spin_unlock_bh(&tgt->tgt_lock);
886 return; 886 return;
@@ -1842,6 +1842,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1842 int rc = 0; 1842 int rc = 0;
1843 struct regpair *task_ctx_bdt; 1843 struct regpair *task_ctx_bdt;
1844 dma_addr_t addr; 1844 dma_addr_t addr;
1845 int task_ctx_arr_sz;
1845 int i; 1846 int i;
1846 1847
1847 /* 1848 /*
@@ -1865,7 +1866,8 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1865 * Allocate task_ctx which is an array of pointers pointing to 1866 * Allocate task_ctx which is an array of pointers pointing to
1866 * a page containing 32 task contexts 1867 * a page containing 32 task contexts
1867 */ 1868 */
1868 hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)), 1869 task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1870 hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)),
1869 GFP_KERNEL); 1871 GFP_KERNEL);
1870 if (!hba->task_ctx) { 1872 if (!hba->task_ctx) {
1871 printk(KERN_ERR PFX "unable to allocate task context array\n"); 1873 printk(KERN_ERR PFX "unable to allocate task context array\n");
@@ -1876,7 +1878,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1876 /* 1878 /*
1877 * Allocate task_ctx_dma which is an array of dma addresses 1879 * Allocate task_ctx_dma which is an array of dma addresses
1878 */ 1880 */
1879 hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ * 1881 hba->task_ctx_dma = kmalloc((task_ctx_arr_sz *
1880 sizeof(dma_addr_t)), GFP_KERNEL); 1882 sizeof(dma_addr_t)), GFP_KERNEL);
1881 if (!hba->task_ctx_dma) { 1883 if (!hba->task_ctx_dma) {
1882 printk(KERN_ERR PFX "unable to alloc context mapping array\n"); 1884 printk(KERN_ERR PFX "unable to alloc context mapping array\n");
@@ -1885,7 +1887,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1885 } 1887 }
1886 1888
1887 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; 1889 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1888 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { 1890 for (i = 0; i < task_ctx_arr_sz; i++) {
1889 1891
1890 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, 1892 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1891 PAGE_SIZE, 1893 PAGE_SIZE,
@@ -1905,7 +1907,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1905 return 0; 1907 return 0;
1906 1908
1907out3: 1909out3:
1908 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { 1910 for (i = 0; i < task_ctx_arr_sz; i++) {
1909 if (hba->task_ctx[i]) { 1911 if (hba->task_ctx[i]) {
1910 1912
1911 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1913 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
@@ -1929,6 +1931,7 @@ out:
1929 1931
1930void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) 1932void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1931{ 1933{
1934 int task_ctx_arr_sz;
1932 int i; 1935 int i;
1933 1936
1934 if (hba->task_ctx_bd_tbl) { 1937 if (hba->task_ctx_bd_tbl) {
@@ -1938,8 +1941,9 @@ void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1938 hba->task_ctx_bd_tbl = NULL; 1941 hba->task_ctx_bd_tbl = NULL;
1939 } 1942 }
1940 1943
1944 task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1941 if (hba->task_ctx) { 1945 if (hba->task_ctx) {
1942 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { 1946 for (i = 0; i < task_ctx_arr_sz; i++) {
1943 if (hba->task_ctx[i]) { 1947 if (hba->task_ctx[i]) {
1944 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1948 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1945 hba->task_ctx[i], 1949 hba->task_ctx[i],
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 60798e829de6..723a9a8ba5ee 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -239,8 +239,7 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
239 sc_cmd->scsi_done(sc_cmd); 239 sc_cmd->scsi_done(sc_cmd);
240} 240}
241 241
242struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, 242struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
243 u16 min_xid, u16 max_xid)
244{ 243{
245 struct bnx2fc_cmd_mgr *cmgr; 244 struct bnx2fc_cmd_mgr *cmgr;
246 struct io_bdt *bdt_info; 245 struct io_bdt *bdt_info;
@@ -252,6 +251,8 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
252 int num_ios, num_pri_ios; 251 int num_ios, num_pri_ios;
253 size_t bd_tbl_sz; 252 size_t bd_tbl_sz;
254 int arr_sz = num_possible_cpus() + 1; 253 int arr_sz = num_possible_cpus() + 1;
254 u16 min_xid = BNX2FC_MIN_XID;
255 u16 max_xid = hba->max_xid;
255 256
256 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { 257 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
257 printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ 258 printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
@@ -298,7 +299,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
298 * of slow path requests. 299 * of slow path requests.
299 */ 300 */
300 xid = BNX2FC_MIN_XID; 301 xid = BNX2FC_MIN_XID;
301 num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS; 302 num_pri_ios = num_ios - hba->elstm_xids;
302 for (i = 0; i < num_ios; i++) { 303 for (i = 0; i < num_ios; i++) {
303 io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); 304 io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
304 305
@@ -367,7 +368,7 @@ void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
367 struct bnx2fc_hba *hba = cmgr->hba; 368 struct bnx2fc_hba *hba = cmgr->hba;
368 size_t bd_tbl_sz; 369 size_t bd_tbl_sz;
369 u16 min_xid = BNX2FC_MIN_XID; 370 u16 min_xid = BNX2FC_MIN_XID;
370 u16 max_xid = BNX2FC_MAX_XID; 371 u16 max_xid = hba->max_xid;
371 int num_ios; 372 int num_ios;
372 int i; 373 int i;
373 374
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 260695186256..adf6e0648f20 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -34,6 +34,7 @@
34#define MLX4_CMD_H 34#define MLX4_CMD_H
35 35
36#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
37#include <linux/if_link.h>
37 38
38enum { 39enum {
39 /* initialization and general commands */ 40 /* initialization and general commands */
@@ -232,6 +233,11 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev);
232void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox); 233void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox);
233 234
234u32 mlx4_comm_get_version(void); 235u32 mlx4_comm_get_version(void);
236int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
237int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
238int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
239int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf);
240
235 241
236#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8) 242#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)
237 243
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 2fbc1464b53b..53acaf64189f 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -155,7 +155,9 @@ enum {
155 MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2, 155 MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2,
156 MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3, 156 MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3,
157 MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN = 1LL << 4, 157 MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN = 1LL << 4,
158 MLX4_DEV_CAP_FLAG2_TS = 1LL << 5 158 MLX4_DEV_CAP_FLAG2_TS = 1LL << 5,
159 MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6,
160 MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7
159}; 161};
160 162
161enum { 163enum {
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 18af08a73f0a..a08bd2b7fe3f 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2192,7 +2192,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
2192 } 2192 }
2193 2193
2194 addr = nla_data(tb[NDA_LLADDR]); 2194 addr = nla_data(tb[NDA_LLADDR]);
2195 if (!is_valid_ether_addr(addr)) { 2195 if (is_zero_ether_addr(addr)) {
2196 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ether address\n"); 2196 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ether address\n");
2197 return -EINVAL; 2197 return -EINVAL;
2198 } 2198 }
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 987a4e5e07e2..c625e4dad4b0 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -444,7 +444,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
444 if (dev->header_ops) { 444 if (dev->header_ops) {
445 /* Need space for new headers */ 445 /* Need space for new headers */
446 if (skb_cow_head(skb, dev->needed_headroom - 446 if (skb_cow_head(skb, dev->needed_headroom -
447 (tunnel->hlen + sizeof(struct iphdr)))); 447 (tunnel->hlen + sizeof(struct iphdr))))
448 goto free_skb; 448 goto free_skb;
449 449
450 tnl_params = (const struct iphdr *)skb->data; 450 tnl_params = (const struct iphdr *)skb->data;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 2f72598dd8fe..2fd6dbea327a 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -598,7 +598,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
598 err = nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr, 598 err = nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
599 ops->policy); 599 ops->policy);
600 if (err < 0) 600 if (err < 0)
601 return err; 601 goto out;
602 } 602 }
603 603
604 info.snd_seq = nlh->nlmsg_seq; 604 info.snd_seq = nlh->nlmsg_seq;
@@ -613,7 +613,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
613 if (family->pre_doit) { 613 if (family->pre_doit) {
614 err = family->pre_doit(ops, skb, &info); 614 err = family->pre_doit(ops, skb, &info);
615 if (err) 615 if (err)
616 return err; 616 goto out;
617 } 617 }
618 618
619 err = ops->doit(skb, &info); 619 err = ops->doit(skb, &info);
@@ -621,6 +621,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
621 if (family->post_doit) 621 if (family->post_doit)
622 family->post_doit(ops, skb, &info); 622 family->post_doit(ops, skb, &info);
623 623
624out:
624 if (family->parallel_ops) 625 if (family->parallel_ops)
625 kfree(attrbuf); 626 kfree(attrbuf);
626 627