aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_alb.c54
-rw-r--r--drivers/net/bonding/bond_main.c134
-rw-r--r--drivers/net/bonding/bond_options.c1
-rw-r--r--drivers/net/bonding/bond_sysfs.c2
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/can/c_can/c_can.c648
-rw-r--r--drivers/net/can/c_can/c_can.h23
-rw-r--r--drivers/net/can/c_can/c_can_pci.c9
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/sja1000/peak_pci.c14
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c16
-rw-r--r--drivers/net/can/slcan.c6
-rw-r--r--drivers/net/ethernet/Kconfig12
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/altera/Kconfig1
-rw-r--r--drivers/net/ethernet/altera/Makefile1
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c118
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.h3
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h13
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c338
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.h3
-rw-r--r--drivers/net/ethernet/altera/altera_sgdmahw.h26
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h53
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c116
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c206
-rw-r--r--drivers/net/ethernet/altera/altera_utils.c20
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h8
-rw-r--r--drivers/net/ethernet/arc/emac.h2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c82
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c60
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/cadence/Kconfig6
-rw-r--r--drivers/net/ethernet/cadence/macb.c35
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c706
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c23
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c223
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c71
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c13
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c40
-rw-r--r--drivers/net/ethernet/jme.c53
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c188
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c77
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h16
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c21
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c99
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c31
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c22
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c11
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h42
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c13
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c31
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c14
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c12
-rw-r--r--drivers/net/ethernet/sfc/efx.c19
-rw-r--r--drivers/net/ethernet/sfc/enum.h23
-rw-r--r--drivers/net/ethernet/sfc/falcon.c4
-rw-r--r--drivers/net/ethernet/sfc/farch.c22
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c55
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h13
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h4
-rw-r--r--drivers/net/ethernet/sfc/nic.c14
-rw-r--r--drivers/net/ethernet/sfc/nic.h1
-rw-r--r--drivers/net/ethernet/sfc/siena.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c17
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/ieee802154/at86rf230.c10
-rw-r--r--drivers/net/macvlan.c21
-rw-r--r--drivers/net/macvtap.c9
-rw-r--r--drivers/net/phy/mdio-gpio.c72
-rw-r--r--drivers/net/phy/micrel.c6
-rw-r--r--drivers/net/phy/phy.c27
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/slip/slip.c6
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c57
-rw-r--r--drivers/net/usb/cdc_ncm.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c28
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vxlan.c42
-rw-r--r--drivers/net/wan/cosa.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c14
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c2
-rw-r--r--drivers/net/wireless/cw1200/debug.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c24
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c261
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c55
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c19
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/wireless/mwifiex/main.c12
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c21
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c6
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.h20
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c5
-rw-r--r--drivers/net/xen-netback/common.h2
-rw-r--r--drivers/net/xen-netback/interface.c30
-rw-r--r--drivers/net/xen-netback/netback.c102
160 files changed, 3438 insertions, 1808 deletions
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9f69e818b000..93580a47cc54 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
82} 82}
83 83
84/* Forward declaration */ 84/* Forward declaration */
85static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); 85static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
86 bool strict_match);
86static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp); 87static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
87static void rlb_src_unlink(struct bonding *bond, u32 index); 88static void rlb_src_unlink(struct bonding *bond, u32 index);
88static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, 89static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
@@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
459 460
460 bond->alb_info.rlb_promisc_timeout_counter = 0; 461 bond->alb_info.rlb_promisc_timeout_counter = 0;
461 462
462 alb_send_learning_packets(bond->curr_active_slave, addr); 463 alb_send_learning_packets(bond->curr_active_slave, addr, true);
463} 464}
464 465
465/* slave being removed should not be active at this point 466/* slave being removed should not be active at this point
@@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
995/*********************** tlb/rlb shared functions *********************/ 996/*********************** tlb/rlb shared functions *********************/
996 997
997static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], 998static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
998 u16 vid) 999 __be16 vlan_proto, u16 vid)
999{ 1000{
1000 struct learning_pkt pkt; 1001 struct learning_pkt pkt;
1001 struct sk_buff *skb; 1002 struct sk_buff *skb;
@@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
1021 skb->dev = slave->dev; 1022 skb->dev = slave->dev;
1022 1023
1023 if (vid) { 1024 if (vid) {
1024 skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid); 1025 skb = vlan_put_tag(skb, vlan_proto, vid);
1025 if (!skb) { 1026 if (!skb) {
1026 pr_err("%s: Error: failed to insert VLAN tag\n", 1027 pr_err("%s: Error: failed to insert VLAN tag\n",
1027 slave->bond->dev->name); 1028 slave->bond->dev->name);
@@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
1032 dev_queue_xmit(skb); 1033 dev_queue_xmit(skb);
1033} 1034}
1034 1035
1035 1036static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
1036static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) 1037 bool strict_match)
1037{ 1038{
1038 struct bonding *bond = bond_get_bond_by_slave(slave); 1039 struct bonding *bond = bond_get_bond_by_slave(slave);
1039 struct net_device *upper; 1040 struct net_device *upper;
1040 struct list_head *iter; 1041 struct list_head *iter;
1041 1042
1042 /* send untagged */ 1043 /* send untagged */
1043 alb_send_lp_vid(slave, mac_addr, 0); 1044 alb_send_lp_vid(slave, mac_addr, 0, 0);
1044 1045
1045 /* loop through vlans and send one packet for each */ 1046 /* loop through vlans and send one packet for each */
1046 rcu_read_lock(); 1047 rcu_read_lock();
1047 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { 1048 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
1048 if (upper->priv_flags & IFF_802_1Q_VLAN) 1049 if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
1049 alb_send_lp_vid(slave, mac_addr, 1050 if (strict_match &&
1050 vlan_dev_vlan_id(upper)); 1051 ether_addr_equal_64bits(mac_addr,
1052 upper->dev_addr)) {
1053 alb_send_lp_vid(slave, mac_addr,
1054 vlan_dev_vlan_proto(upper),
1055 vlan_dev_vlan_id(upper));
1056 } else if (!strict_match) {
1057 alb_send_lp_vid(slave, upper->dev_addr,
1058 vlan_dev_vlan_proto(upper),
1059 vlan_dev_vlan_id(upper));
1060 }
1061 }
1051 } 1062 }
1052 rcu_read_unlock(); 1063 rcu_read_unlock();
1053} 1064}
@@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
1107 1118
1108 /* fasten the change in the switch */ 1119 /* fasten the change in the switch */
1109 if (SLAVE_IS_OK(slave1)) { 1120 if (SLAVE_IS_OK(slave1)) {
1110 alb_send_learning_packets(slave1, slave1->dev->dev_addr); 1121 alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
1111 if (bond->alb_info.rlb_enabled) { 1122 if (bond->alb_info.rlb_enabled) {
1112 /* inform the clients that the mac address 1123 /* inform the clients that the mac address
1113 * has changed 1124 * has changed
@@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
1119 } 1130 }
1120 1131
1121 if (SLAVE_IS_OK(slave2)) { 1132 if (SLAVE_IS_OK(slave2)) {
1122 alb_send_learning_packets(slave2, slave2->dev->dev_addr); 1133 alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
1123 if (bond->alb_info.rlb_enabled) { 1134 if (bond->alb_info.rlb_enabled) {
1124 /* inform the clients that the mac address 1135 /* inform the clients that the mac address
1125 * has changed 1136 * has changed
@@ -1490,6 +1501,8 @@ void bond_alb_monitor(struct work_struct *work)
1490 1501
1491 /* send learning packets */ 1502 /* send learning packets */
1492 if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) { 1503 if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
1504 bool strict_match;
1505
1493 /* change of curr_active_slave involves swapping of mac addresses. 1506 /* change of curr_active_slave involves swapping of mac addresses.
1494 * in order to avoid this swapping from happening while 1507 * in order to avoid this swapping from happening while
1495 * sending the learning packets, the curr_slave_lock must be held for 1508 * sending the learning packets, the curr_slave_lock must be held for
@@ -1497,8 +1510,15 @@ void bond_alb_monitor(struct work_struct *work)
1497 */ 1510 */
1498 read_lock(&bond->curr_slave_lock); 1511 read_lock(&bond->curr_slave_lock);
1499 1512
1500 bond_for_each_slave_rcu(bond, slave, iter) 1513 bond_for_each_slave_rcu(bond, slave, iter) {
1501 alb_send_learning_packets(slave, slave->dev->dev_addr); 1514 /* If updating current_active, use all currently
1515 * user mac addreses (!strict_match). Otherwise, only
1516 * use mac of the slave device.
1517 */
1518 strict_match = (slave != bond->curr_active_slave);
1519 alb_send_learning_packets(slave, slave->dev->dev_addr,
1520 strict_match);
1521 }
1502 1522
1503 read_unlock(&bond->curr_slave_lock); 1523 read_unlock(&bond->curr_slave_lock);
1504 1524
@@ -1721,7 +1741,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1721 } else { 1741 } else {
1722 /* set the new_slave to the bond mac address */ 1742 /* set the new_slave to the bond mac address */
1723 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr); 1743 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
1724 alb_send_learning_packets(new_slave, bond->dev->dev_addr); 1744 alb_send_learning_packets(new_slave, bond->dev->dev_addr,
1745 false);
1725 } 1746 }
1726 1747
1727 write_lock_bh(&bond->curr_slave_lock); 1748 write_lock_bh(&bond->curr_slave_lock);
@@ -1764,7 +1785,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1764 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr); 1785 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
1765 1786
1766 read_lock(&bond->lock); 1787 read_lock(&bond->lock);
1767 alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); 1788 alb_send_learning_packets(bond->curr_active_slave,
1789 bond_dev->dev_addr, false);
1768 if (bond->alb_info.rlb_enabled) { 1790 if (bond->alb_info.rlb_enabled) {
1769 /* inform clients mac address has changed */ 1791 /* inform clients mac address has changed */
1770 rlb_req_update_slave_clients(bond, bond->curr_active_slave); 1792 rlb_req_update_slave_clients(bond, bond->curr_active_slave);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 69aff72c8957..d3a67896d435 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2126 */ 2126 */
2127static void bond_arp_send(struct net_device *slave_dev, int arp_op, 2127static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2128 __be32 dest_ip, __be32 src_ip, 2128 __be32 dest_ip, __be32 src_ip,
2129 struct bond_vlan_tag *inner, 2129 struct bond_vlan_tag *tags)
2130 struct bond_vlan_tag *outer)
2131{ 2130{
2132 struct sk_buff *skb; 2131 struct sk_buff *skb;
2132 int i;
2133 2133
2134 pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n", 2134 pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
2135 arp_op, slave_dev->name, &dest_ip, &src_ip); 2135 arp_op, slave_dev->name, &dest_ip, &src_ip);
@@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2141 net_err_ratelimited("ARP packet allocation failed\n"); 2141 net_err_ratelimited("ARP packet allocation failed\n");
2142 return; 2142 return;
2143 } 2143 }
2144 if (outer->vlan_id) {
2145 if (inner->vlan_id) {
2146 pr_debug("inner tag: proto %X vid %X\n",
2147 ntohs(inner->vlan_proto), inner->vlan_id);
2148 skb = __vlan_put_tag(skb, inner->vlan_proto,
2149 inner->vlan_id);
2150 if (!skb) {
2151 net_err_ratelimited("failed to insert inner VLAN tag\n");
2152 return;
2153 }
2154 }
2155 2144
2156 pr_debug("outer reg: proto %X vid %X\n", 2145 /* Go through all the tags backwards and add them to the packet */
2157 ntohs(outer->vlan_proto), outer->vlan_id); 2146 for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
2158 skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id); 2147 if (!tags[i].vlan_id)
2148 continue;
2149
2150 pr_debug("inner tag: proto %X vid %X\n",
2151 ntohs(tags[i].vlan_proto), tags[i].vlan_id);
2152 skb = __vlan_put_tag(skb, tags[i].vlan_proto,
2153 tags[i].vlan_id);
2154 if (!skb) {
2155 net_err_ratelimited("failed to insert inner VLAN tag\n");
2156 return;
2157 }
2158 }
2159 /* Set the outer tag */
2160 if (tags[0].vlan_id) {
2161 pr_debug("outer tag: proto %X vid %X\n",
2162 ntohs(tags[0].vlan_proto), tags[0].vlan_id);
2163 skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
2159 if (!skb) { 2164 if (!skb) {
2160 net_err_ratelimited("failed to insert outer VLAN tag\n"); 2165 net_err_ratelimited("failed to insert outer VLAN tag\n");
2161 return; 2166 return;
@@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2164 arp_xmit(skb); 2169 arp_xmit(skb);
2165} 2170}
2166 2171
2172/* Validate the device path between the @start_dev and the @end_dev.
2173 * The path is valid if the @end_dev is reachable through device
2174 * stacking.
2175 * When the path is validated, collect any vlan information in the
2176 * path.
2177 */
2178static bool bond_verify_device_path(struct net_device *start_dev,
2179 struct net_device *end_dev,
2180 struct bond_vlan_tag *tags)
2181{
2182 struct net_device *upper;
2183 struct list_head *iter;
2184 int idx;
2185
2186 if (start_dev == end_dev)
2187 return true;
2188
2189 netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2190 if (bond_verify_device_path(upper, end_dev, tags)) {
2191 if (is_vlan_dev(upper)) {
2192 idx = vlan_get_encap_level(upper);
2193 if (idx >= BOND_MAX_VLAN_ENCAP)
2194 return false;
2195
2196 tags[idx].vlan_proto =
2197 vlan_dev_vlan_proto(upper);
2198 tags[idx].vlan_id = vlan_dev_vlan_id(upper);
2199 }
2200 return true;
2201 }
2202 }
2203
2204 return false;
2205}
2167 2206
2168static void bond_arp_send_all(struct bonding *bond, struct slave *slave) 2207static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2169{ 2208{
2170 struct net_device *upper, *vlan_upper;
2171 struct list_head *iter, *vlan_iter;
2172 struct rtable *rt; 2209 struct rtable *rt;
2173 struct bond_vlan_tag inner, outer; 2210 struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
2174 __be32 *targets = bond->params.arp_targets, addr; 2211 __be32 *targets = bond->params.arp_targets, addr;
2175 int i; 2212 int i;
2213 bool ret;
2176 2214
2177 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { 2215 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2178 pr_debug("basa: target %pI4\n", &targets[i]); 2216 pr_debug("basa: target %pI4\n", &targets[i]);
2179 inner.vlan_proto = 0; 2217 memset(tags, 0, sizeof(tags));
2180 inner.vlan_id = 0;
2181 outer.vlan_proto = 0;
2182 outer.vlan_id = 0;
2183 2218
2184 /* Find out through which dev should the packet go */ 2219 /* Find out through which dev should the packet go */
2185 rt = ip_route_output(dev_net(bond->dev), targets[i], 0, 2220 rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
@@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2192 net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n", 2227 net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
2193 bond->dev->name, 2228 bond->dev->name,
2194 &targets[i]); 2229 &targets[i]);
2195 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer); 2230 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2231 0, tags);
2196 continue; 2232 continue;
2197 } 2233 }
2198 2234
@@ -2201,52 +2237,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2201 goto found; 2237 goto found;
2202 2238
2203 rcu_read_lock(); 2239 rcu_read_lock();
2204 /* first we search only for vlan devices. for every vlan 2240 ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
2205 * found we verify its upper dev list, searching for the
2206 * rt->dst.dev. If found we save the tag of the vlan and
2207 * proceed to send the packet.
2208 */
2209 netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
2210 vlan_iter) {
2211 if (!is_vlan_dev(vlan_upper))
2212 continue;
2213
2214 if (vlan_upper == rt->dst.dev) {
2215 outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
2216 outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
2217 rcu_read_unlock();
2218 goto found;
2219 }
2220 netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
2221 iter) {
2222 if (upper == rt->dst.dev) {
2223 /* If the upper dev is a vlan dev too,
2224 * set the vlan tag to inner tag.
2225 */
2226 if (is_vlan_dev(upper)) {
2227 inner.vlan_proto = vlan_dev_vlan_proto(upper);
2228 inner.vlan_id = vlan_dev_vlan_id(upper);
2229 }
2230 outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
2231 outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
2232 rcu_read_unlock();
2233 goto found;
2234 }
2235 }
2236 }
2237
2238 /* if the device we're looking for is not on top of any of
2239 * our upper vlans, then just search for any dev that
2240 * matches, and in case it's a vlan - save the id
2241 */
2242 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
2243 if (upper == rt->dst.dev) {
2244 rcu_read_unlock();
2245 goto found;
2246 }
2247 }
2248 rcu_read_unlock(); 2241 rcu_read_unlock();
2249 2242
2243 if (ret)
2244 goto found;
2245
2250 /* Not our device - skip */ 2246 /* Not our device - skip */
2251 pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", 2247 pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
2252 bond->dev->name, &targets[i], 2248 bond->dev->name, &targets[i],
@@ -2259,7 +2255,7 @@ found:
2259 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0); 2255 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
2260 ip_rt_put(rt); 2256 ip_rt_put(rt);
2261 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2257 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2262 addr, &inner, &outer); 2258 addr, tags);
2263 } 2259 }
2264} 2260}
2265 2261
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 724e30fa20b9..832070298446 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -125,6 +125,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
125static const struct bond_opt_value bond_intmax_tbl[] = { 125static const struct bond_opt_value bond_intmax_tbl[] = {
126 { "off", 0, BOND_VALFLAG_DEFAULT}, 126 { "off", 0, BOND_VALFLAG_DEFAULT},
127 { "maxval", INT_MAX, BOND_VALFLAG_MAX}, 127 { "maxval", INT_MAX, BOND_VALFLAG_MAX},
128 { NULL, -1, 0}
128}; 129};
129 130
130static const struct bond_opt_value bond_lacp_rate_tbl[] = { 131static const struct bond_opt_value bond_lacp_rate_tbl[] = {
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 0e8b268da0a0..5f6babcfc26e 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -534,7 +534,7 @@ static ssize_t bonding_show_min_links(struct device *d,
534{ 534{
535 struct bonding *bond = to_bond(d); 535 struct bonding *bond = to_bond(d);
536 536
537 return sprintf(buf, "%d\n", bond->params.min_links); 537 return sprintf(buf, "%u\n", bond->params.min_links);
538} 538}
539 539
540static ssize_t bonding_store_min_links(struct device *d, 540static ssize_t bonding_store_min_links(struct device *d,
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index b8bdd0acc8f3..00bea320e3b5 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -36,6 +36,7 @@
36 36
37#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" 37#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
38 38
39#define BOND_MAX_VLAN_ENCAP 2
39#define BOND_MAX_ARP_TARGETS 16 40#define BOND_MAX_ARP_TARGETS 16
40 41
41#define BOND_DEFAULT_MIIMON 100 42#define BOND_DEFAULT_MIIMON 100
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index a5c8dcfa8357..95e04e2002da 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -60,6 +60,8 @@
60#define CONTROL_IE BIT(1) 60#define CONTROL_IE BIT(1)
61#define CONTROL_INIT BIT(0) 61#define CONTROL_INIT BIT(0)
62 62
63#define CONTROL_IRQMSK (CONTROL_EIE | CONTROL_IE | CONTROL_SIE)
64
63/* test register */ 65/* test register */
64#define TEST_RX BIT(7) 66#define TEST_RX BIT(7)
65#define TEST_TX1 BIT(6) 67#define TEST_TX1 BIT(6)
@@ -108,11 +110,14 @@
108#define IF_COMM_CONTROL BIT(4) 110#define IF_COMM_CONTROL BIT(4)
109#define IF_COMM_CLR_INT_PND BIT(3) 111#define IF_COMM_CLR_INT_PND BIT(3)
110#define IF_COMM_TXRQST BIT(2) 112#define IF_COMM_TXRQST BIT(2)
113#define IF_COMM_CLR_NEWDAT IF_COMM_TXRQST
111#define IF_COMM_DATAA BIT(1) 114#define IF_COMM_DATAA BIT(1)
112#define IF_COMM_DATAB BIT(0) 115#define IF_COMM_DATAB BIT(0)
113#define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \ 116
114 IF_COMM_CONTROL | IF_COMM_TXRQST | \ 117/* TX buffer setup */
115 IF_COMM_DATAA | IF_COMM_DATAB) 118#define IF_COMM_TX (IF_COMM_ARB | IF_COMM_CONTROL | \
119 IF_COMM_TXRQST | \
120 IF_COMM_DATAA | IF_COMM_DATAB)
116 121
117/* For the low buffers we clear the interrupt bit, but keep newdat */ 122/* For the low buffers we clear the interrupt bit, but keep newdat */
118#define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \ 123#define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \
@@ -120,12 +125,19 @@
120 IF_COMM_DATAA | IF_COMM_DATAB) 125 IF_COMM_DATAA | IF_COMM_DATAB)
121 126
122/* For the high buffers we clear the interrupt bit and newdat */ 127/* For the high buffers we clear the interrupt bit and newdat */
123#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_TXRQST) 128#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT)
129
130
131/* Receive setup of message objects */
132#define IF_COMM_RCV_SETUP (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL)
133
134/* Invalidation of message objects */
135#define IF_COMM_INVAL (IF_COMM_ARB | IF_COMM_CONTROL)
124 136
125/* IFx arbitration */ 137/* IFx arbitration */
126#define IF_ARB_MSGVAL BIT(15) 138#define IF_ARB_MSGVAL BIT(31)
127#define IF_ARB_MSGXTD BIT(14) 139#define IF_ARB_MSGXTD BIT(30)
128#define IF_ARB_TRANSMIT BIT(13) 140#define IF_ARB_TRANSMIT BIT(29)
129 141
130/* IFx message control */ 142/* IFx message control */
131#define IF_MCONT_NEWDAT BIT(15) 143#define IF_MCONT_NEWDAT BIT(15)
@@ -139,19 +151,17 @@
139#define IF_MCONT_EOB BIT(7) 151#define IF_MCONT_EOB BIT(7)
140#define IF_MCONT_DLC_MASK 0xf 152#define IF_MCONT_DLC_MASK 0xf
141 153
154#define IF_MCONT_RCV (IF_MCONT_RXIE | IF_MCONT_UMASK)
155#define IF_MCONT_RCV_EOB (IF_MCONT_RCV | IF_MCONT_EOB)
156
157#define IF_MCONT_TX (IF_MCONT_TXIE | IF_MCONT_EOB)
158
142/* 159/*
143 * Use IF1 for RX and IF2 for TX 160 * Use IF1 for RX and IF2 for TX
144 */ 161 */
145#define IF_RX 0 162#define IF_RX 0
146#define IF_TX 1 163#define IF_TX 1
147 164
148/* status interrupt */
149#define STATUS_INTERRUPT 0x8000
150
151/* global interrupt masks */
152#define ENABLE_ALL_INTERRUPTS 1
153#define DISABLE_ALL_INTERRUPTS 0
154
155/* minimum timeout for checking BUSY status */ 165/* minimum timeout for checking BUSY status */
156#define MIN_TIMEOUT_VALUE 6 166#define MIN_TIMEOUT_VALUE 6
157 167
@@ -171,6 +181,7 @@ enum c_can_lec_type {
171 LEC_BIT0_ERROR, 181 LEC_BIT0_ERROR,
172 LEC_CRC_ERROR, 182 LEC_CRC_ERROR,
173 LEC_UNUSED, 183 LEC_UNUSED,
184 LEC_MASK = LEC_UNUSED,
174}; 185};
175 186
176/* 187/*
@@ -226,143 +237,115 @@ static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
226 priv->raminit(priv, enable); 237 priv->raminit(priv, enable);
227} 238}
228 239
229static inline int get_tx_next_msg_obj(const struct c_can_priv *priv) 240static void c_can_irq_control(struct c_can_priv *priv, bool enable)
230{
231 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
232 C_CAN_MSG_OBJ_TX_FIRST;
233}
234
235static inline int get_tx_echo_msg_obj(int txecho)
236{
237 return (txecho & C_CAN_NEXT_MSG_OBJ_MASK) + C_CAN_MSG_OBJ_TX_FIRST;
238}
239
240static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
241{
242 u32 val = priv->read_reg(priv, index);
243 val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
244 return val;
245}
246
247static void c_can_enable_all_interrupts(struct c_can_priv *priv,
248 int enable)
249{ 241{
250 unsigned int cntrl_save = priv->read_reg(priv, 242 u32 ctrl = priv->read_reg(priv, C_CAN_CTRL_REG) & ~CONTROL_IRQMSK;
251 C_CAN_CTRL_REG);
252 243
253 if (enable) 244 if (enable)
254 cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE); 245 ctrl |= CONTROL_IRQMSK;
255 else
256 cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
257 246
258 priv->write_reg(priv, C_CAN_CTRL_REG, cntrl_save); 247 priv->write_reg(priv, C_CAN_CTRL_REG, ctrl);
259} 248}
260 249
261static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface) 250static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj)
262{ 251{
263 int count = MIN_TIMEOUT_VALUE; 252 struct c_can_priv *priv = netdev_priv(dev);
253 int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface);
264 254
265 while (count && priv->read_reg(priv, 255 priv->write_reg(priv, reg + 1, cmd);
266 C_CAN_IFACE(COMREQ_REG, iface)) & 256 priv->write_reg(priv, reg, obj);
267 IF_COMR_BUSY) { 257
268 count--; 258 for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) {
259 if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY))
260 return;
269 udelay(1); 261 udelay(1);
270 } 262 }
263 netdev_err(dev, "Updating object timed out\n");
271 264
272 if (!count) 265}
273 return 1;
274 266
275 return 0; 267static inline void c_can_object_get(struct net_device *dev, int iface,
268 u32 obj, u32 cmd)
269{
270 c_can_obj_update(dev, iface, cmd, obj);
276} 271}
277 272
278static inline void c_can_object_get(struct net_device *dev, 273static inline void c_can_object_put(struct net_device *dev, int iface,
279 int iface, int objno, int mask) 274 u32 obj, u32 cmd)
280{ 275{
281 struct c_can_priv *priv = netdev_priv(dev); 276 c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj);
277}
282 278
283 /* 279/*
284 * As per specs, after writting the message object number in the 280 * Note: According to documentation clearing TXIE while MSGVAL is set
285 * IF command request register the transfer b/w interface 281 * is not allowed, but works nicely on C/DCAN. And that lowers the I/O
286 * register and message RAM must be complete in 6 CAN-CLK 282 * load significantly.
287 * period. 283 */
288 */ 284static void c_can_inval_tx_object(struct net_device *dev, int iface, int obj)
289 priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface), 285{
290 IFX_WRITE_LOW_16BIT(mask)); 286 struct c_can_priv *priv = netdev_priv(dev);
291 priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
292 IFX_WRITE_LOW_16BIT(objno));
293 287
294 if (c_can_msg_obj_is_busy(priv, iface)) 288 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
295 netdev_err(dev, "timed out in object get\n"); 289 c_can_object_put(dev, iface, obj, IF_COMM_INVAL);
296} 290}
297 291
298static inline void c_can_object_put(struct net_device *dev, 292static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj)
299 int iface, int objno, int mask)
300{ 293{
301 struct c_can_priv *priv = netdev_priv(dev); 294 struct c_can_priv *priv = netdev_priv(dev);
302 295
303 /* 296 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
304 * As per specs, after writting the message object number in the 297 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
305 * IF command request register the transfer b/w interface 298 c_can_inval_tx_object(dev, iface, obj);
306 * register and message RAM must be complete in 6 CAN-CLK
307 * period.
308 */
309 priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
310 (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
311 priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
312 IFX_WRITE_LOW_16BIT(objno));
313
314 if (c_can_msg_obj_is_busy(priv, iface))
315 netdev_err(dev, "timed out in object put\n");
316} 299}
317 300
318static void c_can_write_msg_object(struct net_device *dev, 301static void c_can_setup_tx_object(struct net_device *dev, int iface,
319 int iface, struct can_frame *frame, int objno) 302 struct can_frame *frame, int idx)
320{ 303{
321 int i;
322 u16 flags = 0;
323 unsigned int id;
324 struct c_can_priv *priv = netdev_priv(dev); 304 struct c_can_priv *priv = netdev_priv(dev);
325 305 u16 ctrl = IF_MCONT_TX | frame->can_dlc;
326 if (!(frame->can_id & CAN_RTR_FLAG)) 306 bool rtr = frame->can_id & CAN_RTR_FLAG;
327 flags |= IF_ARB_TRANSMIT; 307 u32 arb = IF_ARB_MSGVAL;
308 int i;
328 309
329 if (frame->can_id & CAN_EFF_FLAG) { 310 if (frame->can_id & CAN_EFF_FLAG) {
330 id = frame->can_id & CAN_EFF_MASK; 311 arb |= frame->can_id & CAN_EFF_MASK;
331 flags |= IF_ARB_MSGXTD; 312 arb |= IF_ARB_MSGXTD;
332 } else 313 } else {
333 id = ((frame->can_id & CAN_SFF_MASK) << 18); 314 arb |= (frame->can_id & CAN_SFF_MASK) << 18;
315 }
316
317 if (!rtr)
318 arb |= IF_ARB_TRANSMIT;
319
320 /*
321 * If we change the DIR bit, we need to invalidate the buffer
322 * first, i.e. clear the MSGVAL flag in the arbiter.
323 */
324 if (rtr != (bool)test_bit(idx, &priv->tx_dir)) {
325 u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
326
327 c_can_inval_msg_object(dev, iface, obj);
328 change_bit(idx, &priv->tx_dir);
329 }
334 330
335 flags |= IF_ARB_MSGVAL; 331 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
332 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), arb >> 16);
336 333
337 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 334 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
338 IFX_WRITE_LOW_16BIT(id));
339 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags |
340 IFX_WRITE_HIGH_16BIT(id));
341 335
342 for (i = 0; i < frame->can_dlc; i += 2) { 336 for (i = 0; i < frame->can_dlc; i += 2) {
343 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2, 337 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
344 frame->data[i] | (frame->data[i + 1] << 8)); 338 frame->data[i] | (frame->data[i + 1] << 8));
345 } 339 }
346
347 /* enable interrupt for this message object */
348 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
349 IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
350 frame->can_dlc);
351 c_can_object_put(dev, iface, objno, IF_COMM_ALL);
352} 340}
353 341
354static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev, 342static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
355 int iface, 343 int iface)
356 int ctrl_mask)
357{ 344{
358 int i; 345 int i;
359 struct c_can_priv *priv = netdev_priv(dev);
360 346
361 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) { 347 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
362 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 348 c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
363 ctrl_mask & ~IF_MCONT_NEWDAT);
364 c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
365 }
366} 349}
367 350
368static int c_can_handle_lost_msg_obj(struct net_device *dev, 351static int c_can_handle_lost_msg_obj(struct net_device *dev,
@@ -377,6 +360,9 @@ static int c_can_handle_lost_msg_obj(struct net_device *dev,
377 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl); 360 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
378 c_can_object_put(dev, iface, objno, IF_COMM_CONTROL); 361 c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
379 362
363 stats->rx_errors++;
364 stats->rx_over_errors++;
365
380 /* create an error msg */ 366 /* create an error msg */
381 skb = alloc_can_err_skb(dev, &frame); 367 skb = alloc_can_err_skb(dev, &frame);
382 if (unlikely(!skb)) 368 if (unlikely(!skb))
@@ -384,22 +370,18 @@ static int c_can_handle_lost_msg_obj(struct net_device *dev,
384 370
385 frame->can_id |= CAN_ERR_CRTL; 371 frame->can_id |= CAN_ERR_CRTL;
386 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 372 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
387 stats->rx_errors++;
388 stats->rx_over_errors++;
389 373
390 netif_receive_skb(skb); 374 netif_receive_skb(skb);
391 return 1; 375 return 1;
392} 376}
393 377
394static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl) 378static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
395{ 379{
396 u16 flags, data;
397 int i;
398 unsigned int val;
399 struct c_can_priv *priv = netdev_priv(dev);
400 struct net_device_stats *stats = &dev->stats; 380 struct net_device_stats *stats = &dev->stats;
401 struct sk_buff *skb; 381 struct c_can_priv *priv = netdev_priv(dev);
402 struct can_frame *frame; 382 struct can_frame *frame;
383 struct sk_buff *skb;
384 u32 arb, data;
403 385
404 skb = alloc_can_skb(dev, &frame); 386 skb = alloc_can_skb(dev, &frame);
405 if (!skb) { 387 if (!skb) {
@@ -409,115 +391,82 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
409 391
410 frame->can_dlc = get_can_dlc(ctrl & 0x0F); 392 frame->can_dlc = get_can_dlc(ctrl & 0x0F);
411 393
412 flags = priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)); 394 arb = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface));
413 val = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)) | 395 arb |= priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)) << 16;
414 (flags << 16);
415 396
416 if (flags & IF_ARB_MSGXTD) 397 if (arb & IF_ARB_MSGXTD)
417 frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG; 398 frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
418 else 399 else
419 frame->can_id = (val >> 18) & CAN_SFF_MASK; 400 frame->can_id = (arb >> 18) & CAN_SFF_MASK;
420 401
421 if (flags & IF_ARB_TRANSMIT) 402 if (arb & IF_ARB_TRANSMIT) {
422 frame->can_id |= CAN_RTR_FLAG; 403 frame->can_id |= CAN_RTR_FLAG;
423 else { 404 } else {
424 for (i = 0; i < frame->can_dlc; i += 2) { 405 int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
425 data = priv->read_reg(priv, 406
426 C_CAN_IFACE(DATA1_REG, iface) + i / 2); 407 for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
408 data = priv->read_reg(priv, dreg);
427 frame->data[i] = data; 409 frame->data[i] = data;
428 frame->data[i + 1] = data >> 8; 410 frame->data[i + 1] = data >> 8;
429 } 411 }
430 } 412 }
431 413
432 netif_receive_skb(skb);
433
434 stats->rx_packets++; 414 stats->rx_packets++;
435 stats->rx_bytes += frame->can_dlc; 415 stats->rx_bytes += frame->can_dlc;
416
417 netif_receive_skb(skb);
436 return 0; 418 return 0;
437} 419}
438 420
439static void c_can_setup_receive_object(struct net_device *dev, int iface, 421static void c_can_setup_receive_object(struct net_device *dev, int iface,
440 int objno, unsigned int mask, 422 u32 obj, u32 mask, u32 id, u32 mcont)
441 unsigned int id, unsigned int mcont)
442{ 423{
443 struct c_can_priv *priv = netdev_priv(dev); 424 struct c_can_priv *priv = netdev_priv(dev);
444 425
445 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), 426 mask |= BIT(29);
446 IFX_WRITE_LOW_16BIT(mask)); 427 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
447 428 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), mask >> 16);
448 /* According to C_CAN documentation, the reserved bit
449 * in IFx_MASK2 register is fixed 1
450 */
451 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
452 IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
453 429
454 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 430 id |= IF_ARB_MSGVAL;
455 IFX_WRITE_LOW_16BIT(id)); 431 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), id);
456 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 432 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), id >> 16);
457 (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
458 433
459 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont); 434 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
460 c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST); 435 c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
461
462 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
463 c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
464}
465
466static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
467{
468 struct c_can_priv *priv = netdev_priv(dev);
469
470 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
471 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
472 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
473
474 c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
475
476 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
477 c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
478}
479
480static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
481{
482 int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
483
484 /*
485 * as transmission request register's bit n-1 corresponds to
486 * message object n, we need to handle the same properly.
487 */
488 if (val & (1 << (objno - 1)))
489 return 1;
490
491 return 0;
492} 436}
493 437
494static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, 438static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
495 struct net_device *dev) 439 struct net_device *dev)
496{ 440{
497 u32 msg_obj_no;
498 struct c_can_priv *priv = netdev_priv(dev);
499 struct can_frame *frame = (struct can_frame *)skb->data; 441 struct can_frame *frame = (struct can_frame *)skb->data;
442 struct c_can_priv *priv = netdev_priv(dev);
443 u32 idx, obj;
500 444
501 if (can_dropped_invalid_skb(dev, skb)) 445 if (can_dropped_invalid_skb(dev, skb))
502 return NETDEV_TX_OK; 446 return NETDEV_TX_OK;
503
504 spin_lock_bh(&priv->xmit_lock);
505 msg_obj_no = get_tx_next_msg_obj(priv);
506
507 /* prepare message object for transmission */
508 c_can_write_msg_object(dev, IF_TX, frame, msg_obj_no);
509 priv->dlc[msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST] = frame->can_dlc;
510 can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
511
512 /* 447 /*
513 * we have to stop the queue in case of a wrap around or 448 * This is not a FIFO. C/D_CAN sends out the buffers
514 * if the next TX message object is still in use 449 * prioritized. The lowest buffer number wins.
515 */ 450 */
516 priv->tx_next++; 451 idx = fls(atomic_read(&priv->tx_active));
517 if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) || 452 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
518 (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0) 453
454 /* If this is the last buffer, stop the xmit queue */
455 if (idx == C_CAN_MSG_OBJ_TX_NUM - 1)
519 netif_stop_queue(dev); 456 netif_stop_queue(dev);
520 spin_unlock_bh(&priv->xmit_lock); 457 /*
458 * Store the message in the interface so we can call
459 * can_put_echo_skb(). We must do this before we enable
460 * transmit as we might race against do_tx().
461 */
462 c_can_setup_tx_object(dev, IF_TX, frame, idx);
463 priv->dlc[idx] = frame->can_dlc;
464 can_put_echo_skb(skb, dev, idx);
465
466 /* Update the active bits */
467 atomic_add((1 << idx), &priv->tx_active);
468 /* Start transmission */
469 c_can_object_put(dev, IF_TX, obj, IF_COMM_TX);
521 470
522 return NETDEV_TX_OK; 471 return NETDEV_TX_OK;
523} 472}
@@ -594,11 +543,10 @@ static void c_can_configure_msg_objects(struct net_device *dev)
594 543
595 /* setup receive message objects */ 544 /* setup receive message objects */
596 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++) 545 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
597 c_can_setup_receive_object(dev, IF_RX, i, 0, 0, 546 c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV);
598 (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
599 547
600 c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0, 548 c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
601 IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK); 549 IF_MCONT_RCV_EOB);
602} 550}
603 551
604/* 552/*
@@ -612,30 +560,22 @@ static int c_can_chip_config(struct net_device *dev)
612 struct c_can_priv *priv = netdev_priv(dev); 560 struct c_can_priv *priv = netdev_priv(dev);
613 561
614 /* enable automatic retransmission */ 562 /* enable automatic retransmission */
615 priv->write_reg(priv, C_CAN_CTRL_REG, 563 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
616 CONTROL_ENABLE_AR);
617 564
618 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) && 565 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
619 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) { 566 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
620 /* loopback + silent mode : useful for hot self-test */ 567 /* loopback + silent mode : useful for hot self-test */
621 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE | 568 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
622 CONTROL_SIE | CONTROL_IE | CONTROL_TEST); 569 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT);
623 priv->write_reg(priv, C_CAN_TEST_REG,
624 TEST_LBACK | TEST_SILENT);
625 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { 570 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
626 /* loopback mode : useful for self-test function */ 571 /* loopback mode : useful for self-test function */
627 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE | 572 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
628 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
629 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK); 573 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
630 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) { 574 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
631 /* silent mode : bus-monitoring mode */ 575 /* silent mode : bus-monitoring mode */
632 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE | 576 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
633 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
634 priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT); 577 priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
635 } else 578 }
636 /* normal mode*/
637 priv->write_reg(priv, C_CAN_CTRL_REG,
638 CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
639 579
640 /* configure message objects */ 580 /* configure message objects */
641 c_can_configure_msg_objects(dev); 581 c_can_configure_msg_objects(dev);
@@ -643,6 +583,11 @@ static int c_can_chip_config(struct net_device *dev)
643 /* set a `lec` value so that we can check for updates later */ 583 /* set a `lec` value so that we can check for updates later */
644 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); 584 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
645 585
586 /* Clear all internal status */
587 atomic_set(&priv->tx_active, 0);
588 priv->rxmasked = 0;
589 priv->tx_dir = 0;
590
646 /* set bittiming params */ 591 /* set bittiming params */
647 return c_can_set_bittiming(dev); 592 return c_can_set_bittiming(dev);
648} 593}
@@ -657,13 +602,11 @@ static int c_can_start(struct net_device *dev)
657 if (err) 602 if (err)
658 return err; 603 return err;
659 604
660 priv->can.state = CAN_STATE_ERROR_ACTIVE; 605 /* Setup the command for new messages */
661 606 priv->comm_rcv_high = priv->type != BOSCH_D_CAN ?
662 /* reset tx helper pointers */ 607 IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
663 priv->tx_next = priv->tx_echo = 0;
664 608
665 /* enable status change, error and module interrupts */ 609 priv->can.state = CAN_STATE_ERROR_ACTIVE;
666 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
667 610
668 return 0; 611 return 0;
669} 612}
@@ -672,15 +615,13 @@ static void c_can_stop(struct net_device *dev)
672{ 615{
673 struct c_can_priv *priv = netdev_priv(dev); 616 struct c_can_priv *priv = netdev_priv(dev);
674 617
675 /* disable all interrupts */ 618 c_can_irq_control(priv, false);
676 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
677
678 /* set the state as STOPPED */
679 priv->can.state = CAN_STATE_STOPPED; 619 priv->can.state = CAN_STATE_STOPPED;
680} 620}
681 621
682static int c_can_set_mode(struct net_device *dev, enum can_mode mode) 622static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
683{ 623{
624 struct c_can_priv *priv = netdev_priv(dev);
684 int err; 625 int err;
685 626
686 switch (mode) { 627 switch (mode) {
@@ -689,6 +630,7 @@ static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
689 if (err) 630 if (err)
690 return err; 631 return err;
691 netif_wake_queue(dev); 632 netif_wake_queue(dev);
633 c_can_irq_control(priv, true);
692 break; 634 break;
693 default: 635 default:
694 return -EOPNOTSUPP; 636 return -EOPNOTSUPP;
@@ -724,42 +666,29 @@ static int c_can_get_berr_counter(const struct net_device *dev,
724 return err; 666 return err;
725} 667}
726 668
727/*
728 * priv->tx_echo holds the number of the oldest can_frame put for
729 * transmission into the hardware, but not yet ACKed by the CAN tx
730 * complete IRQ.
731 *
732 * We iterate from priv->tx_echo to priv->tx_next and check if the
733 * packet has been transmitted, echo it back to the CAN framework.
734 * If we discover a not yet transmitted packet, stop looking for more.
735 */
736static void c_can_do_tx(struct net_device *dev) 669static void c_can_do_tx(struct net_device *dev)
737{ 670{
738 struct c_can_priv *priv = netdev_priv(dev); 671 struct c_can_priv *priv = netdev_priv(dev);
739 struct net_device_stats *stats = &dev->stats; 672 struct net_device_stats *stats = &dev->stats;
740 u32 val, obj, pkts = 0, bytes = 0; 673 u32 idx, obj, pkts = 0, bytes = 0, pend, clr;
741
742 spin_lock_bh(&priv->xmit_lock);
743
744 for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
745 obj = get_tx_echo_msg_obj(priv->tx_echo);
746 val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
747 674
748 if (val & (1 << (obj - 1))) 675 clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG);
749 break;
750 676
751 can_get_echo_skb(dev, obj - C_CAN_MSG_OBJ_TX_FIRST); 677 while ((idx = ffs(pend))) {
752 bytes += priv->dlc[obj - C_CAN_MSG_OBJ_TX_FIRST]; 678 idx--;
679 pend &= ~(1 << idx);
680 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
681 c_can_inval_tx_object(dev, IF_RX, obj);
682 can_get_echo_skb(dev, idx);
683 bytes += priv->dlc[idx];
753 pkts++; 684 pkts++;
754 c_can_inval_msg_object(dev, IF_TX, obj);
755 } 685 }
756 686
757 /* restart queue if wrap-up or if queue stalled on last pkt */ 687 /* Clear the bits in the tx_active mask */
758 if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) || 688 atomic_sub(clr, &priv->tx_active);
759 ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
760 netif_wake_queue(dev);
761 689
762 spin_unlock_bh(&priv->xmit_lock); 690 if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1)))
691 netif_wake_queue(dev);
763 692
764 if (pkts) { 693 if (pkts) {
765 stats->tx_bytes += bytes; 694 stats->tx_bytes += bytes;
@@ -800,18 +729,28 @@ static u32 c_can_adjust_pending(u32 pend)
800 return pend & ~((1 << lasts) - 1); 729 return pend & ~((1 << lasts) - 1);
801} 730}
802 731
732static inline void c_can_rx_object_get(struct net_device *dev,
733 struct c_can_priv *priv, u32 obj)
734{
735 c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
736}
737
738static inline void c_can_rx_finalize(struct net_device *dev,
739 struct c_can_priv *priv, u32 obj)
740{
741 if (priv->type != BOSCH_D_CAN)
742 c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
743}
744
803static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, 745static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
804 u32 pend, int quota) 746 u32 pend, int quota)
805{ 747{
806 u32 pkts = 0, ctrl, obj, mcmd; 748 u32 pkts = 0, ctrl, obj;
807 749
808 while ((obj = ffs(pend)) && quota > 0) { 750 while ((obj = ffs(pend)) && quota > 0) {
809 pend &= ~BIT(obj - 1); 751 pend &= ~BIT(obj - 1);
810 752
811 mcmd = obj < C_CAN_MSG_RX_LOW_LAST ? 753 c_can_rx_object_get(dev, priv, obj);
812 IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
813
814 c_can_object_get(dev, IF_RX, obj, mcmd);
815 ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX)); 754 ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
816 755
817 if (ctrl & IF_MCONT_MSGLST) { 756 if (ctrl & IF_MCONT_MSGLST) {
@@ -833,9 +772,7 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
833 /* read the data from the message object */ 772 /* read the data from the message object */
834 c_can_read_msg_object(dev, IF_RX, ctrl); 773 c_can_read_msg_object(dev, IF_RX, ctrl);
835 774
836 if (obj == C_CAN_MSG_RX_LOW_LAST) 775 c_can_rx_finalize(dev, priv, obj);
837 /* activate all lower message objects */
838 c_can_activate_all_lower_rx_msg_obj(dev, IF_RX, ctrl);
839 776
840 pkts++; 777 pkts++;
841 quota--; 778 quota--;
@@ -844,6 +781,13 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
844 return pkts; 781 return pkts;
845} 782}
846 783
784static inline u32 c_can_get_pending(struct c_can_priv *priv)
785{
786 u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
787
788 return pend;
789}
790
847/* 791/*
848 * theory of operation: 792 * theory of operation:
849 * 793 *
@@ -853,18 +797,9 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
853 * has arrived. To work-around this issue, we keep two groups of message 797 * has arrived. To work-around this issue, we keep two groups of message
854 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. 798 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
855 * 799 *
856 * To ensure in-order frame reception we use the following 800 * We clear the newdat bit right away.
857 * approach while re-activating a message object to receive further 801 *
858 * frames: 802 * This can result in packet reordering when the readout is slow.
859 * - if the current message object number is lower than
860 * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
861 * the INTPND bit.
862 * - if the current message object number is equal to
863 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
864 * receive message objects.
865 * - if the current message object number is greater than
866 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
867 * only this message object.
868 */ 803 */
869static int c_can_do_rx_poll(struct net_device *dev, int quota) 804static int c_can_do_rx_poll(struct net_device *dev, int quota)
870{ 805{
@@ -880,7 +815,7 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
880 815
881 while (quota > 0) { 816 while (quota > 0) {
882 if (!pend) { 817 if (!pend) {
883 pend = priv->read_reg(priv, C_CAN_INTPND1_REG); 818 pend = c_can_get_pending(priv);
884 if (!pend) 819 if (!pend)
885 break; 820 break;
886 /* 821 /*
@@ -905,12 +840,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
905 return pkts; 840 return pkts;
906} 841}
907 842
908static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
909{
910 return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
911 (priv->current_status & LEC_UNUSED);
912}
913
914static int c_can_handle_state_change(struct net_device *dev, 843static int c_can_handle_state_change(struct net_device *dev,
915 enum c_can_bus_error_types error_type) 844 enum c_can_bus_error_types error_type)
916{ 845{
@@ -922,6 +851,26 @@ static int c_can_handle_state_change(struct net_device *dev,
922 struct sk_buff *skb; 851 struct sk_buff *skb;
923 struct can_berr_counter bec; 852 struct can_berr_counter bec;
924 853
854 switch (error_type) {
855 case C_CAN_ERROR_WARNING:
856 /* error warning state */
857 priv->can.can_stats.error_warning++;
858 priv->can.state = CAN_STATE_ERROR_WARNING;
859 break;
860 case C_CAN_ERROR_PASSIVE:
861 /* error passive state */
862 priv->can.can_stats.error_passive++;
863 priv->can.state = CAN_STATE_ERROR_PASSIVE;
864 break;
865 case C_CAN_BUS_OFF:
866 /* bus-off state */
867 priv->can.state = CAN_STATE_BUS_OFF;
868 can_bus_off(dev);
869 break;
870 default:
871 break;
872 }
873
925 /* propagate the error condition to the CAN stack */ 874 /* propagate the error condition to the CAN stack */
926 skb = alloc_can_err_skb(dev, &cf); 875 skb = alloc_can_err_skb(dev, &cf);
927 if (unlikely(!skb)) 876 if (unlikely(!skb))
@@ -935,8 +884,6 @@ static int c_can_handle_state_change(struct net_device *dev,
935 switch (error_type) { 884 switch (error_type) {
936 case C_CAN_ERROR_WARNING: 885 case C_CAN_ERROR_WARNING:
937 /* error warning state */ 886 /* error warning state */
938 priv->can.can_stats.error_warning++;
939 priv->can.state = CAN_STATE_ERROR_WARNING;
940 cf->can_id |= CAN_ERR_CRTL; 887 cf->can_id |= CAN_ERR_CRTL;
941 cf->data[1] = (bec.txerr > bec.rxerr) ? 888 cf->data[1] = (bec.txerr > bec.rxerr) ?
942 CAN_ERR_CRTL_TX_WARNING : 889 CAN_ERR_CRTL_TX_WARNING :
@@ -947,8 +894,6 @@ static int c_can_handle_state_change(struct net_device *dev,
947 break; 894 break;
948 case C_CAN_ERROR_PASSIVE: 895 case C_CAN_ERROR_PASSIVE:
949 /* error passive state */ 896 /* error passive state */
950 priv->can.can_stats.error_passive++;
951 priv->can.state = CAN_STATE_ERROR_PASSIVE;
952 cf->can_id |= CAN_ERR_CRTL; 897 cf->can_id |= CAN_ERR_CRTL;
953 if (rx_err_passive) 898 if (rx_err_passive)
954 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; 899 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
@@ -960,22 +905,16 @@ static int c_can_handle_state_change(struct net_device *dev,
960 break; 905 break;
961 case C_CAN_BUS_OFF: 906 case C_CAN_BUS_OFF:
962 /* bus-off state */ 907 /* bus-off state */
963 priv->can.state = CAN_STATE_BUS_OFF;
964 cf->can_id |= CAN_ERR_BUSOFF; 908 cf->can_id |= CAN_ERR_BUSOFF;
965 /*
966 * disable all interrupts in bus-off mode to ensure that
967 * the CPU is not hogged down
968 */
969 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
970 can_bus_off(dev); 909 can_bus_off(dev);
971 break; 910 break;
972 default: 911 default:
973 break; 912 break;
974 } 913 }
975 914
976 netif_receive_skb(skb);
977 stats->rx_packets++; 915 stats->rx_packets++;
978 stats->rx_bytes += cf->can_dlc; 916 stats->rx_bytes += cf->can_dlc;
917 netif_receive_skb(skb);
979 918
980 return 1; 919 return 1;
981} 920}
@@ -996,6 +935,13 @@ static int c_can_handle_bus_err(struct net_device *dev,
996 if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR) 935 if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
997 return 0; 936 return 0;
998 937
938 if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
939 return 0;
940
941 /* common for all type of bus errors */
942 priv->can.can_stats.bus_error++;
943 stats->rx_errors++;
944
999 /* propagate the error condition to the CAN stack */ 945 /* propagate the error condition to the CAN stack */
1000 skb = alloc_can_err_skb(dev, &cf); 946 skb = alloc_can_err_skb(dev, &cf);
1001 if (unlikely(!skb)) 947 if (unlikely(!skb))
@@ -1005,10 +951,6 @@ static int c_can_handle_bus_err(struct net_device *dev,
1005 * check for 'last error code' which tells us the 951 * check for 'last error code' which tells us the
1006 * type of the last error to occur on the CAN bus 952 * type of the last error to occur on the CAN bus
1007 */ 953 */
1008
1009 /* common for all type of bus errors */
1010 priv->can.can_stats.bus_error++;
1011 stats->rx_errors++;
1012 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 954 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1013 cf->data[2] |= CAN_ERR_PROT_UNSPEC; 955 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
1014 956
@@ -1043,95 +985,64 @@ static int c_can_handle_bus_err(struct net_device *dev,
1043 break; 985 break;
1044 } 986 }
1045 987
1046 /* set a `lec` value so that we can check for updates later */
1047 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
1048
1049 netif_receive_skb(skb);
1050 stats->rx_packets++; 988 stats->rx_packets++;
1051 stats->rx_bytes += cf->can_dlc; 989 stats->rx_bytes += cf->can_dlc;
1052 990 netif_receive_skb(skb);
1053 return 1; 991 return 1;
1054} 992}
1055 993
1056static int c_can_poll(struct napi_struct *napi, int quota) 994static int c_can_poll(struct napi_struct *napi, int quota)
1057{ 995{
1058 u16 irqstatus;
1059 int lec_type = 0;
1060 int work_done = 0;
1061 struct net_device *dev = napi->dev; 996 struct net_device *dev = napi->dev;
1062 struct c_can_priv *priv = netdev_priv(dev); 997 struct c_can_priv *priv = netdev_priv(dev);
998 u16 curr, last = priv->last_status;
999 int work_done = 0;
1063 1000
1064 irqstatus = priv->irqstatus; 1001 priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
1065 if (!irqstatus) 1002 /* Ack status on C_CAN. D_CAN is self clearing */
1066 goto end; 1003 if (priv->type != BOSCH_D_CAN)
1004 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
1067 1005
1068 /* status events have the highest priority */ 1006 /* handle state changes */
1069 if (irqstatus == STATUS_INTERRUPT) { 1007 if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
1070 priv->current_status = priv->read_reg(priv, 1008 netdev_dbg(dev, "entered error warning state\n");
1071 C_CAN_STS_REG); 1009 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
1072 1010 }
1073 /* handle Tx/Rx events */
1074 if (priv->current_status & STATUS_TXOK)
1075 priv->write_reg(priv, C_CAN_STS_REG,
1076 priv->current_status & ~STATUS_TXOK);
1077
1078 if (priv->current_status & STATUS_RXOK)
1079 priv->write_reg(priv, C_CAN_STS_REG,
1080 priv->current_status & ~STATUS_RXOK);
1081
1082 /* handle state changes */
1083 if ((priv->current_status & STATUS_EWARN) &&
1084 (!(priv->last_status & STATUS_EWARN))) {
1085 netdev_dbg(dev, "entered error warning state\n");
1086 work_done += c_can_handle_state_change(dev,
1087 C_CAN_ERROR_WARNING);
1088 }
1089 if ((priv->current_status & STATUS_EPASS) &&
1090 (!(priv->last_status & STATUS_EPASS))) {
1091 netdev_dbg(dev, "entered error passive state\n");
1092 work_done += c_can_handle_state_change(dev,
1093 C_CAN_ERROR_PASSIVE);
1094 }
1095 if ((priv->current_status & STATUS_BOFF) &&
1096 (!(priv->last_status & STATUS_BOFF))) {
1097 netdev_dbg(dev, "entered bus off state\n");
1098 work_done += c_can_handle_state_change(dev,
1099 C_CAN_BUS_OFF);
1100 }
1101 1011
1102 /* handle bus recovery events */ 1012 if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) {
1103 if ((!(priv->current_status & STATUS_BOFF)) && 1013 netdev_dbg(dev, "entered error passive state\n");
1104 (priv->last_status & STATUS_BOFF)) { 1014 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
1105 netdev_dbg(dev, "left bus off state\n"); 1015 }
1106 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1107 }
1108 if ((!(priv->current_status & STATUS_EPASS)) &&
1109 (priv->last_status & STATUS_EPASS)) {
1110 netdev_dbg(dev, "left error passive state\n");
1111 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1112 }
1113 1016
1114 priv->last_status = priv->current_status; 1017 if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) {
1115 1018 netdev_dbg(dev, "entered bus off state\n");
1116 /* handle lec errors on the bus */ 1019 work_done += c_can_handle_state_change(dev, C_CAN_BUS_OFF);
1117 lec_type = c_can_has_and_handle_berr(priv); 1020 goto end;
1118 if (lec_type)
1119 work_done += c_can_handle_bus_err(dev, lec_type);
1120 } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
1121 (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
1122 /* handle events corresponding to receive message objects */
1123 work_done += c_can_do_rx_poll(dev, (quota - work_done));
1124 } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
1125 (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
1126 /* handle events corresponding to transmit message objects */
1127 c_can_do_tx(dev);
1128 } 1021 }
1129 1022
1023 /* handle bus recovery events */
1024 if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
1025 netdev_dbg(dev, "left bus off state\n");
1026 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1027 }
1028 if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
1029 netdev_dbg(dev, "left error passive state\n");
1030 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1031 }
1032
1033 /* handle lec errors on the bus */
1034 work_done += c_can_handle_bus_err(dev, curr & LEC_MASK);
1035
1036 /* Handle Tx/Rx events. We do this unconditionally */
1037 work_done += c_can_do_rx_poll(dev, (quota - work_done));
1038 c_can_do_tx(dev);
1039
1130end: 1040end:
1131 if (work_done < quota) { 1041 if (work_done < quota) {
1132 napi_complete(napi); 1042 napi_complete(napi);
1133 /* enable all IRQs */ 1043 /* enable all IRQs if we are not in bus off state */
1134 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS); 1044 if (priv->can.state != CAN_STATE_BUS_OFF)
1045 c_can_irq_control(priv, true);
1135 } 1046 }
1136 1047
1137 return work_done; 1048 return work_done;
@@ -1142,12 +1053,11 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
1142 struct net_device *dev = (struct net_device *)dev_id; 1053 struct net_device *dev = (struct net_device *)dev_id;
1143 struct c_can_priv *priv = netdev_priv(dev); 1054 struct c_can_priv *priv = netdev_priv(dev);
1144 1055
1145 priv->irqstatus = priv->read_reg(priv, C_CAN_INT_REG); 1056 if (!priv->read_reg(priv, C_CAN_INT_REG))
1146 if (!priv->irqstatus)
1147 return IRQ_NONE; 1057 return IRQ_NONE;
1148 1058
1149 /* disable all interrupts and schedule the NAPI */ 1059 /* disable all interrupts and schedule the NAPI */
1150 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS); 1060 c_can_irq_control(priv, false);
1151 napi_schedule(&priv->napi); 1061 napi_schedule(&priv->napi);
1152 1062
1153 return IRQ_HANDLED; 1063 return IRQ_HANDLED;
@@ -1184,6 +1094,8 @@ static int c_can_open(struct net_device *dev)
1184 can_led_event(dev, CAN_LED_EVENT_OPEN); 1094 can_led_event(dev, CAN_LED_EVENT_OPEN);
1185 1095
1186 napi_enable(&priv->napi); 1096 napi_enable(&priv->napi);
1097 /* enable status change, error and module interrupts */
1098 c_can_irq_control(priv, true);
1187 netif_start_queue(dev); 1099 netif_start_queue(dev);
1188 1100
1189 return 0; 1101 return 0;
@@ -1226,7 +1138,6 @@ struct net_device *alloc_c_can_dev(void)
1226 return NULL; 1138 return NULL;
1227 1139
1228 priv = netdev_priv(dev); 1140 priv = netdev_priv(dev);
1229 spin_lock_init(&priv->xmit_lock);
1230 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT); 1141 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
1231 1142
1232 priv->dev = dev; 1143 priv->dev = dev;
@@ -1281,6 +1192,7 @@ int c_can_power_up(struct net_device *dev)
1281 u32 val; 1192 u32 val;
1282 unsigned long time_out; 1193 unsigned long time_out;
1283 struct c_can_priv *priv = netdev_priv(dev); 1194 struct c_can_priv *priv = netdev_priv(dev);
1195 int ret;
1284 1196
1285 if (!(dev->flags & IFF_UP)) 1197 if (!(dev->flags & IFF_UP))
1286 return 0; 1198 return 0;
@@ -1307,7 +1219,11 @@ int c_can_power_up(struct net_device *dev)
1307 if (time_after(jiffies, time_out)) 1219 if (time_after(jiffies, time_out))
1308 return -ETIMEDOUT; 1220 return -ETIMEDOUT;
1309 1221
1310 return c_can_start(dev); 1222 ret = c_can_start(dev);
1223 if (!ret)
1224 c_can_irq_control(priv, true);
1225
1226 return ret;
1311} 1227}
1312EXPORT_SYMBOL_GPL(c_can_power_up); 1228EXPORT_SYMBOL_GPL(c_can_power_up);
1313#endif 1229#endif
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index faa8404162b3..c56f1b1c11ca 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -22,14 +22,6 @@
22#ifndef C_CAN_H 22#ifndef C_CAN_H
23#define C_CAN_H 23#define C_CAN_H
24 24
25/*
26 * IFx register masks:
27 * allow easy operation on 16-bit registers when the
28 * argument is 32-bit instead
29 */
30#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
31#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
32
33/* message object split */ 25/* message object split */
34#define C_CAN_NO_OF_OBJECTS 32 26#define C_CAN_NO_OF_OBJECTS 32
35#define C_CAN_MSG_OBJ_RX_NUM 16 27#define C_CAN_MSG_OBJ_RX_NUM 16
@@ -45,8 +37,6 @@
45 37
46#define C_CAN_MSG_OBJ_RX_SPLIT 9 38#define C_CAN_MSG_OBJ_RX_SPLIT 9
47#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1) 39#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
48
49#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
50#define RECEIVE_OBJECT_BITS 0x0000ffff 40#define RECEIVE_OBJECT_BITS 0x0000ffff
51 41
52enum reg { 42enum reg {
@@ -183,23 +173,20 @@ struct c_can_priv {
183 struct napi_struct napi; 173 struct napi_struct napi;
184 struct net_device *dev; 174 struct net_device *dev;
185 struct device *device; 175 struct device *device;
186 spinlock_t xmit_lock; 176 atomic_t tx_active;
187 int tx_object; 177 unsigned long tx_dir;
188 int current_status;
189 int last_status; 178 int last_status;
190 u16 (*read_reg) (struct c_can_priv *priv, enum reg index); 179 u16 (*read_reg) (struct c_can_priv *priv, enum reg index);
191 void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val); 180 void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val);
192 void __iomem *base; 181 void __iomem *base;
193 const u16 *regs; 182 const u16 *regs;
194 unsigned long irq_flags; /* for request_irq() */
195 unsigned int tx_next;
196 unsigned int tx_echo;
197 void *priv; /* for board-specific data */ 183 void *priv; /* for board-specific data */
198 u16 irqstatus;
199 enum c_can_dev_id type; 184 enum c_can_dev_id type;
200 u32 __iomem *raminit_ctrlreg; 185 u32 __iomem *raminit_ctrlreg;
201 unsigned int instance; 186 int instance;
202 void (*raminit) (const struct c_can_priv *priv, bool enable); 187 void (*raminit) (const struct c_can_priv *priv, bool enable);
188 u32 comm_rcv_high;
189 u32 rxmasked;
203 u32 dlc[C_CAN_MSG_OBJ_TX_NUM]; 190 u32 dlc[C_CAN_MSG_OBJ_TX_NUM];
204}; 191};
205 192
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index bce0be54c2f5..fe5f6303b584 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -84,8 +84,11 @@ static int c_can_pci_probe(struct pci_dev *pdev,
84 goto out_disable_device; 84 goto out_disable_device;
85 } 85 }
86 86
87 pci_set_master(pdev); 87 ret = pci_enable_msi(pdev);
88 pci_enable_msi(pdev); 88 if (!ret) {
89 dev_info(&pdev->dev, "MSI enabled\n");
90 pci_set_master(pdev);
91 }
89 92
90 addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); 93 addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
91 if (!addr) { 94 if (!addr) {
@@ -132,6 +135,8 @@ static int c_can_pci_probe(struct pci_dev *pdev,
132 goto out_free_c_can; 135 goto out_free_c_can;
133 } 136 }
134 137
138 priv->type = c_can_pci_data->type;
139
135 /* Configure access to registers */ 140 /* Configure access to registers */
136 switch (c_can_pci_data->reg_align) { 141 switch (c_can_pci_data->reg_align) {
137 case C_CAN_REG_ALIGN_32: 142 case C_CAN_REG_ALIGN_32:
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 806d92753427..1df0b322d1e4 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -222,7 +222,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
222 222
223 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 223 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
224 priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res); 224 priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
225 if (IS_ERR(priv->raminit_ctrlreg) || (int)priv->instance < 0) 225 if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
226 dev_info(&pdev->dev, "control memory is not used for raminit\n"); 226 dev_info(&pdev->dev, "control memory is not used for raminit\n");
227 else 227 else
228 priv->raminit = c_can_hw_raminit; 228 priv->raminit = c_can_hw_raminit;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c7a260478749..e318e87e2bfc 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -256,7 +256,7 @@ static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
256 256
257 /* Check if the CAN device has bit-timing parameters */ 257 /* Check if the CAN device has bit-timing parameters */
258 if (!btc) 258 if (!btc)
259 return -ENOTSUPP; 259 return -EOPNOTSUPP;
260 260
261 /* 261 /*
262 * Depending on the given can_bittiming parameter structure the CAN 262 * Depending on the given can_bittiming parameter structure the CAN
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index c540e3d12e3d..564933ae218c 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
551{ 551{
552 struct sja1000_priv *priv; 552 struct sja1000_priv *priv;
553 struct peak_pci_chan *chan; 553 struct peak_pci_chan *chan;
554 struct net_device *dev; 554 struct net_device *dev, *prev_dev;
555 void __iomem *cfg_base, *reg_base; 555 void __iomem *cfg_base, *reg_base;
556 u16 sub_sys_id, icr; 556 u16 sub_sys_id, icr;
557 int i, err, channels; 557 int i, err, channels;
@@ -688,11 +688,13 @@ failure_remove_channels:
688 writew(0x0, cfg_base + PITA_ICR + 2); 688 writew(0x0, cfg_base + PITA_ICR + 2);
689 689
690 chan = NULL; 690 chan = NULL;
691 for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) { 691 for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
692 unregister_sja1000dev(dev);
693 free_sja1000dev(dev);
694 priv = netdev_priv(dev); 692 priv = netdev_priv(dev);
695 chan = priv->priv; 693 chan = priv->priv;
694 prev_dev = chan->prev_dev;
695
696 unregister_sja1000dev(dev);
697 free_sja1000dev(dev);
696 } 698 }
697 699
698 /* free any PCIeC resources too */ 700 /* free any PCIeC resources too */
@@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
726 728
727 /* Loop over all registered devices */ 729 /* Loop over all registered devices */
728 while (1) { 730 while (1) {
731 struct net_device *prev_dev = chan->prev_dev;
732
729 dev_info(&pdev->dev, "removing device %s\n", dev->name); 733 dev_info(&pdev->dev, "removing device %s\n", dev->name);
730 unregister_sja1000dev(dev); 734 unregister_sja1000dev(dev);
731 free_sja1000dev(dev); 735 free_sja1000dev(dev);
732 dev = chan->prev_dev; 736 dev = prev_dev;
733 737
734 if (!dev) { 738 if (!dev) {
735 /* do that only for first channel */ 739 /* do that only for first channel */
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index df136a2516c4..014695d7e6a3 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -46,6 +46,7 @@ static int clk[MAXDEV];
46static unsigned char cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; 46static unsigned char cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
47static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; 47static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
48static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; 48static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
49static spinlock_t indirect_lock[MAXDEV]; /* lock for indirect access mode */
49 50
50module_param_array(port, ulong, NULL, S_IRUGO); 51module_param_array(port, ulong, NULL, S_IRUGO);
51MODULE_PARM_DESC(port, "I/O port number"); 52MODULE_PARM_DESC(port, "I/O port number");
@@ -101,19 +102,26 @@ static void sja1000_isa_port_write_reg(const struct sja1000_priv *priv,
101static u8 sja1000_isa_port_read_reg_indirect(const struct sja1000_priv *priv, 102static u8 sja1000_isa_port_read_reg_indirect(const struct sja1000_priv *priv,
102 int reg) 103 int reg)
103{ 104{
104 unsigned long base = (unsigned long)priv->reg_base; 105 unsigned long flags, base = (unsigned long)priv->reg_base;
106 u8 readval;
105 107
108 spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags);
106 outb(reg, base); 109 outb(reg, base);
107 return inb(base + 1); 110 readval = inb(base + 1);
111 spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags);
112
113 return readval;
108} 114}
109 115
110static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv, 116static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv,
111 int reg, u8 val) 117 int reg, u8 val)
112{ 118{
113 unsigned long base = (unsigned long)priv->reg_base; 119 unsigned long flags, base = (unsigned long)priv->reg_base;
114 120
121 spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags);
115 outb(reg, base); 122 outb(reg, base);
116 outb(val, base + 1); 123 outb(val, base + 1);
124 spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags);
117} 125}
118 126
119static int sja1000_isa_probe(struct platform_device *pdev) 127static int sja1000_isa_probe(struct platform_device *pdev)
@@ -169,6 +177,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
169 if (iosize == SJA1000_IOSIZE_INDIRECT) { 177 if (iosize == SJA1000_IOSIZE_INDIRECT) {
170 priv->read_reg = sja1000_isa_port_read_reg_indirect; 178 priv->read_reg = sja1000_isa_port_read_reg_indirect;
171 priv->write_reg = sja1000_isa_port_write_reg_indirect; 179 priv->write_reg = sja1000_isa_port_write_reg_indirect;
180 spin_lock_init(&indirect_lock[idx]);
172 } else { 181 } else {
173 priv->read_reg = sja1000_isa_port_read_reg; 182 priv->read_reg = sja1000_isa_port_read_reg;
174 priv->write_reg = sja1000_isa_port_write_reg; 183 priv->write_reg = sja1000_isa_port_write_reg;
@@ -198,6 +207,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
198 207
199 platform_set_drvdata(pdev, dev); 208 platform_set_drvdata(pdev, dev);
200 SET_NETDEV_DEV(dev, &pdev->dev); 209 SET_NETDEV_DEV(dev, &pdev->dev);
210 dev->dev_id = idx;
201 211
202 err = register_sja1000dev(dev); 212 err = register_sja1000dev(dev);
203 if (err) { 213 if (err) {
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index f5b16e0e3a12..dcf9196f6316 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -322,13 +322,13 @@ static void slcan_write_wakeup(struct tty_struct *tty)
322 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) 322 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
323 return; 323 return;
324 324
325 spin_lock(&sl->lock); 325 spin_lock_bh(&sl->lock);
326 if (sl->xleft <= 0) { 326 if (sl->xleft <= 0) {
327 /* Now serial buffer is almost free & we can start 327 /* Now serial buffer is almost free & we can start
328 * transmission of another packet */ 328 * transmission of another packet */
329 sl->dev->stats.tx_packets++; 329 sl->dev->stats.tx_packets++;
330 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 330 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
331 spin_unlock(&sl->lock); 331 spin_unlock_bh(&sl->lock);
332 netif_wake_queue(sl->dev); 332 netif_wake_queue(sl->dev);
333 return; 333 return;
334 } 334 }
@@ -336,7 +336,7 @@ static void slcan_write_wakeup(struct tty_struct *tty)
336 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 336 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
337 sl->xleft -= actual; 337 sl->xleft -= actual;
338 sl->xhead += actual; 338 sl->xhead += actual;
339 spin_unlock(&sl->lock); 339 spin_unlock_bh(&sl->lock);
340} 340}
341 341
342/* Send a can_frame to a TTY queue. */ 342/* Send a can_frame to a TTY queue. */
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 39b26fe28d10..d7401017a3f1 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig"
35source "drivers/net/ethernet/chelsio/Kconfig" 35source "drivers/net/ethernet/chelsio/Kconfig"
36source "drivers/net/ethernet/cirrus/Kconfig" 36source "drivers/net/ethernet/cirrus/Kconfig"
37source "drivers/net/ethernet/cisco/Kconfig" 37source "drivers/net/ethernet/cisco/Kconfig"
38
39config CX_ECAT
40 tristate "Beckhoff CX5020 EtherCAT master support"
41 depends on PCI
42 ---help---
43 Driver for EtherCAT master module located on CCAT FPGA
44 that can be found on Beckhoff CX5020, and possibly other of CX
45 Beckhoff CX series industrial PCs.
46
47 To compile this driver as a module, choose M here. The module
48 will be called ec_bhf.
49
38source "drivers/net/ethernet/davicom/Kconfig" 50source "drivers/net/ethernet/davicom/Kconfig"
39 51
40config DNET 52config DNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 545d0b3b9cb4..35190e36c456 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
21obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ 21obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
22obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ 22obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
23obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ 23obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
24obj-$(CONFIG_CX_ECAT) += ec_bhf.o
24obj-$(CONFIG_DM9000) += davicom/ 25obj-$(CONFIG_DM9000) += davicom/
25obj-$(CONFIG_DNET) += dnet.o 26obj-$(CONFIG_DNET) += dnet.o
26obj-$(CONFIG_NET_VENDOR_DEC) += dec/ 27obj-$(CONFIG_NET_VENDOR_DEC) += dec/
diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig
index 80c1ab74a4b8..fdddba51473e 100644
--- a/drivers/net/ethernet/altera/Kconfig
+++ b/drivers/net/ethernet/altera/Kconfig
@@ -1,5 +1,6 @@
1config ALTERA_TSE 1config ALTERA_TSE
2 tristate "Altera Triple-Speed Ethernet MAC support" 2 tristate "Altera Triple-Speed Ethernet MAC support"
3 depends on HAS_DMA
3 select PHYLIB 4 select PHYLIB
4 ---help--- 5 ---help---
5 This driver supports the Altera Triple-Speed (TSE) Ethernet MAC. 6 This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
index d4a187e45369..3eff2fd3997e 100644
--- a/drivers/net/ethernet/altera/Makefile
+++ b/drivers/net/ethernet/altera/Makefile
@@ -5,3 +5,4 @@
5obj-$(CONFIG_ALTERA_TSE) += altera_tse.o 5obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
6altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \ 6altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
7altera_msgdma.o altera_sgdma.o altera_utils.o 7altera_msgdma.o altera_sgdma.o altera_utils.o
8ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 3df18669ea30..0fb986ba3290 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -18,6 +18,7 @@
18#include "altera_utils.h" 18#include "altera_utils.h"
19#include "altera_tse.h" 19#include "altera_tse.h"
20#include "altera_msgdmahw.h" 20#include "altera_msgdmahw.h"
21#include "altera_msgdma.h"
21 22
22/* No initialization work to do for MSGDMA */ 23/* No initialization work to do for MSGDMA */
23int msgdma_initialize(struct altera_tse_private *priv) 24int msgdma_initialize(struct altera_tse_private *priv)
@@ -29,21 +30,23 @@ void msgdma_uninitialize(struct altera_tse_private *priv)
29{ 30{
30} 31}
31 32
33void msgdma_start_rxdma(struct altera_tse_private *priv)
34{
35}
36
32void msgdma_reset(struct altera_tse_private *priv) 37void msgdma_reset(struct altera_tse_private *priv)
33{ 38{
34 int counter; 39 int counter;
35 struct msgdma_csr *txcsr =
36 (struct msgdma_csr *)priv->tx_dma_csr;
37 struct msgdma_csr *rxcsr =
38 (struct msgdma_csr *)priv->rx_dma_csr;
39 40
40 /* Reset Rx mSGDMA */ 41 /* Reset Rx mSGDMA */
41 iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); 42 csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr,
42 iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control); 43 msgdma_csroffs(status));
44 csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr,
45 msgdma_csroffs(control));
43 46
44 counter = 0; 47 counter = 0;
45 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 48 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
46 if (tse_bit_is_clear(&rxcsr->status, 49 if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status),
47 MSGDMA_CSR_STAT_RESETTING)) 50 MSGDMA_CSR_STAT_RESETTING))
48 break; 51 break;
49 udelay(1); 52 udelay(1);
@@ -54,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv)
54 "TSE Rx mSGDMA resetting bit never cleared!\n"); 57 "TSE Rx mSGDMA resetting bit never cleared!\n");
55 58
56 /* clear all status bits */ 59 /* clear all status bits */
57 iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); 60 csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status));
58 61
59 /* Reset Tx mSGDMA */ 62 /* Reset Tx mSGDMA */
60 iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); 63 csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr,
61 iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control); 64 msgdma_csroffs(status));
65
66 csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr,
67 msgdma_csroffs(control));
62 68
63 counter = 0; 69 counter = 0;
64 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 70 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
65 if (tse_bit_is_clear(&txcsr->status, 71 if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status),
66 MSGDMA_CSR_STAT_RESETTING)) 72 MSGDMA_CSR_STAT_RESETTING))
67 break; 73 break;
68 udelay(1); 74 udelay(1);
@@ -73,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv)
73 "TSE Tx mSGDMA resetting bit never cleared!\n"); 79 "TSE Tx mSGDMA resetting bit never cleared!\n");
74 80
75 /* clear all status bits */ 81 /* clear all status bits */
76 iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); 82 csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status));
77} 83}
78 84
79void msgdma_disable_rxirq(struct altera_tse_private *priv) 85void msgdma_disable_rxirq(struct altera_tse_private *priv)
80{ 86{
81 struct msgdma_csr *csr = priv->rx_dma_csr; 87 tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control),
82 tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 88 MSGDMA_CSR_CTL_GLOBAL_INTR);
83} 89}
84 90
85void msgdma_enable_rxirq(struct altera_tse_private *priv) 91void msgdma_enable_rxirq(struct altera_tse_private *priv)
86{ 92{
87 struct msgdma_csr *csr = priv->rx_dma_csr; 93 tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control),
88 tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 94 MSGDMA_CSR_CTL_GLOBAL_INTR);
89} 95}
90 96
91void msgdma_disable_txirq(struct altera_tse_private *priv) 97void msgdma_disable_txirq(struct altera_tse_private *priv)
92{ 98{
93 struct msgdma_csr *csr = priv->tx_dma_csr; 99 tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control),
94 tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 100 MSGDMA_CSR_CTL_GLOBAL_INTR);
95} 101}
96 102
97void msgdma_enable_txirq(struct altera_tse_private *priv) 103void msgdma_enable_txirq(struct altera_tse_private *priv)
98{ 104{
99 struct msgdma_csr *csr = priv->tx_dma_csr; 105 tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control),
100 tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 106 MSGDMA_CSR_CTL_GLOBAL_INTR);
101} 107}
102 108
103void msgdma_clear_rxirq(struct altera_tse_private *priv) 109void msgdma_clear_rxirq(struct altera_tse_private *priv)
104{ 110{
105 struct msgdma_csr *csr = priv->rx_dma_csr; 111 csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status));
106 iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
107} 112}
108 113
109void msgdma_clear_txirq(struct altera_tse_private *priv) 114void msgdma_clear_txirq(struct altera_tse_private *priv)
110{ 115{
111 struct msgdma_csr *csr = priv->tx_dma_csr; 116 csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status));
112 iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
113} 117}
114 118
115/* return 0 to indicate transmit is pending */ 119/* return 0 to indicate transmit is pending */
116int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) 120int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
117{ 121{
118 struct msgdma_extended_desc *desc = priv->tx_dma_desc; 122 csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc,
119 123 msgdma_descroffs(read_addr_lo));
120 iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo); 124 csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc,
121 iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi); 125 msgdma_descroffs(read_addr_hi));
122 iowrite32(0, &desc->write_addr_lo); 126 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo));
123 iowrite32(0, &desc->write_addr_hi); 127 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi));
124 iowrite32(buffer->len, &desc->len); 128 csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len));
125 iowrite32(0, &desc->burst_seq_num); 129 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num));
126 iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride); 130 csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc,
127 iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control); 131 msgdma_descroffs(stride));
132 csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc,
133 msgdma_descroffs(control));
128 return 0; 134 return 0;
129} 135}
130 136
@@ -133,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
133 u32 ready = 0; 139 u32 ready = 0;
134 u32 inuse; 140 u32 inuse;
135 u32 status; 141 u32 status;
136 struct msgdma_csr *txcsr =
137 (struct msgdma_csr *)priv->tx_dma_csr;
138 142
139 /* Get number of sent descriptors */ 143 /* Get number of sent descriptors */
140 inuse = ioread32(&txcsr->rw_fill_level) & 0xffff; 144 inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level))
145 & 0xffff;
141 146
142 if (inuse) { /* Tx FIFO is not empty */ 147 if (inuse) { /* Tx FIFO is not empty */
143 ready = priv->tx_prod - priv->tx_cons - inuse - 1; 148 ready = priv->tx_prod - priv->tx_cons - inuse - 1;
144 } else { 149 } else {
145 /* Check for buffered last packet */ 150 /* Check for buffered last packet */
146 status = ioread32(&txcsr->status); 151 status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
147 if (status & MSGDMA_CSR_STAT_BUSY) 152 if (status & MSGDMA_CSR_STAT_BUSY)
148 ready = priv->tx_prod - priv->tx_cons - 1; 153 ready = priv->tx_prod - priv->tx_cons - 1;
149 else 154 else
@@ -154,10 +159,9 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
154 159
155/* Put buffer to the mSGDMA RX FIFO 160/* Put buffer to the mSGDMA RX FIFO
156 */ 161 */
157int msgdma_add_rx_desc(struct altera_tse_private *priv, 162void msgdma_add_rx_desc(struct altera_tse_private *priv,
158 struct tse_buffer *rxbuffer) 163 struct tse_buffer *rxbuffer)
159{ 164{
160 struct msgdma_extended_desc *desc = priv->rx_dma_desc;
161 u32 len = priv->rx_dma_buf_sz; 165 u32 len = priv->rx_dma_buf_sz;
162 dma_addr_t dma_addr = rxbuffer->dma_addr; 166 dma_addr_t dma_addr = rxbuffer->dma_addr;
163 u32 control = (MSGDMA_DESC_CTL_END_ON_EOP 167 u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
@@ -167,15 +171,16 @@ int msgdma_add_rx_desc(struct altera_tse_private *priv,
167 | MSGDMA_DESC_CTL_TR_ERR_IRQ 171 | MSGDMA_DESC_CTL_TR_ERR_IRQ
168 | MSGDMA_DESC_CTL_GO); 172 | MSGDMA_DESC_CTL_GO);
169 173
170 iowrite32(0, &desc->read_addr_lo); 174 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo));
171 iowrite32(0, &desc->read_addr_hi); 175 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi));
172 iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo); 176 csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc,
173 iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi); 177 msgdma_descroffs(write_addr_lo));
174 iowrite32(len, &desc->len); 178 csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc,
175 iowrite32(0, &desc->burst_seq_num); 179 msgdma_descroffs(write_addr_hi));
176 iowrite32(0x00010001, &desc->stride); 180 csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len));
177 iowrite32(control, &desc->control); 181 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num));
178 return 1; 182 csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride));
183 csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control));
179} 184}
180 185
181/* status is returned on upper 16 bits, 186/* status is returned on upper 16 bits,
@@ -186,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
186 u32 rxstatus = 0; 191 u32 rxstatus = 0;
187 u32 pktlength; 192 u32 pktlength;
188 u32 pktstatus; 193 u32 pktstatus;
189 struct msgdma_csr *rxcsr = 194
190 (struct msgdma_csr *)priv->rx_dma_csr; 195 if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level))
191 struct msgdma_response *rxresp = 196 & 0xffff) {
192 (struct msgdma_response *)priv->rx_dma_resp; 197 pktlength = csrrd32(priv->rx_dma_resp,
193 198 msgdma_respoffs(bytes_transferred));
194 if (ioread32(&rxcsr->resp_fill_level) & 0xffff) { 199 pktstatus = csrrd32(priv->rx_dma_resp,
195 pktlength = ioread32(&rxresp->bytes_transferred); 200 msgdma_respoffs(status));
196 pktstatus = ioread32(&rxresp->status);
197 rxstatus = pktstatus; 201 rxstatus = pktstatus;
198 rxstatus = rxstatus << 16; 202 rxstatus = rxstatus << 16;
199 rxstatus |= (pktlength & 0xffff); 203 rxstatus |= (pktlength & 0xffff);
diff --git a/drivers/net/ethernet/altera/altera_msgdma.h b/drivers/net/ethernet/altera/altera_msgdma.h
index 7f0f5bf2bba2..42cf61c81057 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.h
+++ b/drivers/net/ethernet/altera/altera_msgdma.h
@@ -25,10 +25,11 @@ void msgdma_disable_txirq(struct altera_tse_private *);
25void msgdma_clear_rxirq(struct altera_tse_private *); 25void msgdma_clear_rxirq(struct altera_tse_private *);
26void msgdma_clear_txirq(struct altera_tse_private *); 26void msgdma_clear_txirq(struct altera_tse_private *);
27u32 msgdma_tx_completions(struct altera_tse_private *); 27u32 msgdma_tx_completions(struct altera_tse_private *);
28int msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *); 28void msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *);
29int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *); 29int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *);
30u32 msgdma_rx_status(struct altera_tse_private *); 30u32 msgdma_rx_status(struct altera_tse_private *);
31int msgdma_initialize(struct altera_tse_private *); 31int msgdma_initialize(struct altera_tse_private *);
32void msgdma_uninitialize(struct altera_tse_private *); 32void msgdma_uninitialize(struct altera_tse_private *);
33void msgdma_start_rxdma(struct altera_tse_private *);
33 34
34#endif /* __ALTERA_MSGDMA_H__ */ 35#endif /* __ALTERA_MSGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
index d7b59ba4019c..e335626e1b6b 100644
--- a/drivers/net/ethernet/altera/altera_msgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -17,15 +17,6 @@
17#ifndef __ALTERA_MSGDMAHW_H__ 17#ifndef __ALTERA_MSGDMAHW_H__
18#define __ALTERA_MSGDMAHW_H__ 18#define __ALTERA_MSGDMAHW_H__
19 19
20/* mSGDMA standard descriptor format
21 */
22struct msgdma_desc {
23 u32 read_addr; /* data buffer source address */
24 u32 write_addr; /* data buffer destination address */
25 u32 len; /* the number of bytes to transfer per descriptor */
26 u32 control; /* characteristics of the transfer */
27};
28
29/* mSGDMA extended descriptor format 20/* mSGDMA extended descriptor format
30 */ 21 */
31struct msgdma_extended_desc { 22struct msgdma_extended_desc {
@@ -159,6 +150,10 @@ struct msgdma_response {
159 u32 status; 150 u32 status;
160}; 151};
161 152
153#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a))
154#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a))
155#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a))
156
162/* mSGDMA response register bit definitions 157/* mSGDMA response register bit definitions
163 */ 158 */
164#define MSGDMA_RESP_EARLY_TERM BIT(8) 159#define MSGDMA_RESP_EARLY_TERM BIT(8)
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 0ee96639ae44..99cc56f451cf 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -20,28 +20,28 @@
20#include "altera_sgdmahw.h" 20#include "altera_sgdmahw.h"
21#include "altera_sgdma.h" 21#include "altera_sgdma.h"
22 22
23static void sgdma_descrip(struct sgdma_descrip *desc, 23static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
24 struct sgdma_descrip *ndesc, 24 struct sgdma_descrip __iomem *ndesc,
25 dma_addr_t ndesc_phys, 25 dma_addr_t ndesc_phys,
26 dma_addr_t raddr, 26 dma_addr_t raddr,
27 dma_addr_t waddr, 27 dma_addr_t waddr,
28 u16 length, 28 u16 length,
29 int generate_eop, 29 int generate_eop,
30 int rfixed, 30 int rfixed,
31 int wfixed); 31 int wfixed);
32 32
33static int sgdma_async_write(struct altera_tse_private *priv, 33static int sgdma_async_write(struct altera_tse_private *priv,
34 struct sgdma_descrip *desc); 34 struct sgdma_descrip __iomem *desc);
35 35
36static int sgdma_async_read(struct altera_tse_private *priv); 36static int sgdma_async_read(struct altera_tse_private *priv);
37 37
38static dma_addr_t 38static dma_addr_t
39sgdma_txphysaddr(struct altera_tse_private *priv, 39sgdma_txphysaddr(struct altera_tse_private *priv,
40 struct sgdma_descrip *desc); 40 struct sgdma_descrip __iomem *desc);
41 41
42static dma_addr_t 42static dma_addr_t
43sgdma_rxphysaddr(struct altera_tse_private *priv, 43sgdma_rxphysaddr(struct altera_tse_private *priv,
44 struct sgdma_descrip *desc); 44 struct sgdma_descrip __iomem *desc);
45 45
46static int sgdma_txbusy(struct altera_tse_private *priv); 46static int sgdma_txbusy(struct altera_tse_private *priv);
47 47
@@ -64,18 +64,23 @@ queue_rx_peekhead(struct altera_tse_private *priv);
64 64
65int sgdma_initialize(struct altera_tse_private *priv) 65int sgdma_initialize(struct altera_tse_private *priv)
66{ 66{
67 priv->txctrlreg = SGDMA_CTRLREG_ILASTD; 67 priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
68 SGDMA_CTRLREG_INTEN;
68 69
69 priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP | 70 priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
71 SGDMA_CTRLREG_INTEN |
70 SGDMA_CTRLREG_ILASTD; 72 SGDMA_CTRLREG_ILASTD;
71 73
74 priv->sgdmadesclen = sizeof(struct sgdma_descrip);
75
72 INIT_LIST_HEAD(&priv->txlisthd); 76 INIT_LIST_HEAD(&priv->txlisthd);
73 INIT_LIST_HEAD(&priv->rxlisthd); 77 INIT_LIST_HEAD(&priv->rxlisthd);
74 78
75 priv->rxdescphys = (dma_addr_t) 0; 79 priv->rxdescphys = (dma_addr_t) 0;
76 priv->txdescphys = (dma_addr_t) 0; 80 priv->txdescphys = (dma_addr_t) 0;
77 81
78 priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc, 82 priv->rxdescphys = dma_map_single(priv->device,
83 (void __force *)priv->rx_dma_desc,
79 priv->rxdescmem, DMA_BIDIRECTIONAL); 84 priv->rxdescmem, DMA_BIDIRECTIONAL);
80 85
81 if (dma_mapping_error(priv->device, priv->rxdescphys)) { 86 if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@ -84,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
84 return -EINVAL; 89 return -EINVAL;
85 } 90 }
86 91
87 priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc, 92 priv->txdescphys = dma_map_single(priv->device,
93 (void __force *)priv->tx_dma_desc,
88 priv->txdescmem, DMA_TO_DEVICE); 94 priv->txdescmem, DMA_TO_DEVICE);
89 95
90 if (dma_mapping_error(priv->device, priv->txdescphys)) { 96 if (dma_mapping_error(priv->device, priv->txdescphys)) {
@@ -93,6 +99,16 @@ int sgdma_initialize(struct altera_tse_private *priv)
93 return -EINVAL; 99 return -EINVAL;
94 } 100 }
95 101
102 /* Initialize descriptor memory to all 0's, sync memory to cache */
103 memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
104 memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
105
106 dma_sync_single_for_device(priv->device, priv->txdescphys,
107 priv->txdescmem, DMA_TO_DEVICE);
108
109 dma_sync_single_for_device(priv->device, priv->rxdescphys,
110 priv->rxdescmem, DMA_TO_DEVICE);
111
96 return 0; 112 return 0;
97} 113}
98 114
@@ -112,58 +128,48 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
112 */ 128 */
113void sgdma_reset(struct altera_tse_private *priv) 129void sgdma_reset(struct altera_tse_private *priv)
114{ 130{
115 u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
116 u32 txdescriplen = priv->txdescmem;
117 u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
118 u32 rxdescriplen = priv->rxdescmem;
119 struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
120 struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
121
122 /* Initialize descriptor memory to 0 */ 131 /* Initialize descriptor memory to 0 */
123 memset(ptxdescripmem, 0, txdescriplen); 132 memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
124 memset(prxdescripmem, 0, rxdescriplen); 133 memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
125 134
126 iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control); 135 csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
127 iowrite32(0, &ptxsgdma->control); 136 csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
128 137
129 iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control); 138 csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
130 iowrite32(0, &prxsgdma->control); 139 csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
131} 140}
132 141
142/* For SGDMA, interrupts remain enabled after initially enabling,
143 * so no need to provide implementations for abstract enable
144 * and disable
145 */
146
133void sgdma_enable_rxirq(struct altera_tse_private *priv) 147void sgdma_enable_rxirq(struct altera_tse_private *priv)
134{ 148{
135 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
136 priv->rxctrlreg |= SGDMA_CTRLREG_INTEN;
137 tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
138} 149}
139 150
140void sgdma_enable_txirq(struct altera_tse_private *priv) 151void sgdma_enable_txirq(struct altera_tse_private *priv)
141{ 152{
142 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
143 priv->txctrlreg |= SGDMA_CTRLREG_INTEN;
144 tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
145} 153}
146 154
147/* for SGDMA, RX interrupts remain enabled after enabling */
148void sgdma_disable_rxirq(struct altera_tse_private *priv) 155void sgdma_disable_rxirq(struct altera_tse_private *priv)
149{ 156{
150} 157}
151 158
152/* for SGDMA, TX interrupts remain enabled after enabling */
153void sgdma_disable_txirq(struct altera_tse_private *priv) 159void sgdma_disable_txirq(struct altera_tse_private *priv)
154{ 160{
155} 161}
156 162
157void sgdma_clear_rxirq(struct altera_tse_private *priv) 163void sgdma_clear_rxirq(struct altera_tse_private *priv)
158{ 164{
159 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 165 tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
160 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); 166 SGDMA_CTRLREG_CLRINT);
161} 167}
162 168
163void sgdma_clear_txirq(struct altera_tse_private *priv) 169void sgdma_clear_txirq(struct altera_tse_private *priv)
164{ 170{
165 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; 171 tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
166 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); 172 SGDMA_CTRLREG_CLRINT);
167} 173}
168 174
169/* transmits buffer through SGDMA. Returns number of buffers 175/* transmits buffer through SGDMA. Returns number of buffers
@@ -173,28 +179,27 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
173 */ 179 */
174int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) 180int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
175{ 181{
176 int pktstx = 0; 182 struct sgdma_descrip __iomem *descbase =
177 struct sgdma_descrip *descbase = 183 (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
178 (struct sgdma_descrip *)priv->tx_dma_desc;
179 184
180 struct sgdma_descrip *cdesc = &descbase[0]; 185 struct sgdma_descrip __iomem *cdesc = &descbase[0];
181 struct sgdma_descrip *ndesc = &descbase[1]; 186 struct sgdma_descrip __iomem *ndesc = &descbase[1];
182 187
183 /* wait 'til the tx sgdma is ready for the next transmit request */ 188 /* wait 'til the tx sgdma is ready for the next transmit request */
184 if (sgdma_txbusy(priv)) 189 if (sgdma_txbusy(priv))
185 return 0; 190 return 0;
186 191
187 sgdma_descrip(cdesc, /* current descriptor */ 192 sgdma_setup_descrip(cdesc, /* current descriptor */
188 ndesc, /* next descriptor */ 193 ndesc, /* next descriptor */
189 sgdma_txphysaddr(priv, ndesc), 194 sgdma_txphysaddr(priv, ndesc),
190 buffer->dma_addr, /* address of packet to xmit */ 195 buffer->dma_addr, /* address of packet to xmit */
191 0, /* write addr 0 for tx dma */ 196 0, /* write addr 0 for tx dma */
192 buffer->len, /* length of packet */ 197 buffer->len, /* length of packet */
193 SGDMA_CONTROL_EOP, /* Generate EOP */ 198 SGDMA_CONTROL_EOP, /* Generate EOP */
194 0, /* read fixed */ 199 0, /* read fixed */
195 SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ 200 SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
196 201
197 pktstx = sgdma_async_write(priv, cdesc); 202 sgdma_async_write(priv, cdesc);
198 203
199 /* enqueue the request to the pending transmit queue */ 204 /* enqueue the request to the pending transmit queue */
200 queue_tx(priv, buffer); 205 queue_tx(priv, buffer);
@@ -208,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
208u32 sgdma_tx_completions(struct altera_tse_private *priv) 213u32 sgdma_tx_completions(struct altera_tse_private *priv)
209{ 214{
210 u32 ready = 0; 215 u32 ready = 0;
211 struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
212 216
213 if (!sgdma_txbusy(priv) && 217 if (!sgdma_txbusy(priv) &&
214 ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) && 218 ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
219 & SGDMA_CONTROL_HW_OWNED) == 0) &&
215 (dequeue_tx(priv))) { 220 (dequeue_tx(priv))) {
216 ready = 1; 221 ready = 1;
217 } 222 }
@@ -219,11 +224,15 @@ u32 sgdma_tx_completions(struct altera_tse_private *priv)
219 return ready; 224 return ready;
220} 225}
221 226
222int sgdma_add_rx_desc(struct altera_tse_private *priv, 227void sgdma_start_rxdma(struct altera_tse_private *priv)
223 struct tse_buffer *rxbuffer) 228{
229 sgdma_async_read(priv);
230}
231
232void sgdma_add_rx_desc(struct altera_tse_private *priv,
233 struct tse_buffer *rxbuffer)
224{ 234{
225 queue_rx(priv, rxbuffer); 235 queue_rx(priv, rxbuffer);
226 return sgdma_async_read(priv);
227} 236}
228 237
229/* status is returned on upper 16 bits, 238/* status is returned on upper 16 bits,
@@ -231,38 +240,62 @@ int sgdma_add_rx_desc(struct altera_tse_private *priv,
231 */ 240 */
232u32 sgdma_rx_status(struct altera_tse_private *priv) 241u32 sgdma_rx_status(struct altera_tse_private *priv)
233{ 242{
234 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 243 struct sgdma_descrip __iomem *base =
235 struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc; 244 (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
236 struct sgdma_descrip *desc = NULL; 245 struct sgdma_descrip __iomem *desc = NULL;
237 int pktsrx;
238 unsigned int rxstatus = 0;
239 unsigned int pktlength = 0;
240 unsigned int pktstatus = 0;
241 struct tse_buffer *rxbuffer = NULL; 246 struct tse_buffer *rxbuffer = NULL;
247 unsigned int rxstatus = 0;
242 248
243 dma_sync_single_for_cpu(priv->device, 249 u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
244 priv->rxdescphys,
245 priv->rxdescmem,
246 DMA_BIDIRECTIONAL);
247 250
248 desc = &base[0]; 251 desc = &base[0];
249 if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) || 252 if (sts & SGDMA_STSREG_EOP) {
250 (desc->status & SGDMA_STATUS_EOP)) { 253 unsigned int pktlength = 0;
251 pktlength = desc->bytes_xferred; 254 unsigned int pktstatus = 0;
252 pktstatus = desc->status & 0x3f; 255 dma_sync_single_for_cpu(priv->device,
253 rxstatus = pktstatus; 256 priv->rxdescphys,
257 priv->sgdmadesclen,
258 DMA_FROM_DEVICE);
259
260 pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
261 pktstatus = csrrd8(desc, sgdma_descroffs(status));
262 rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
254 rxstatus = rxstatus << 16; 263 rxstatus = rxstatus << 16;
255 rxstatus |= (pktlength & 0xffff); 264 rxstatus |= (pktlength & 0xffff);
256 265
257 desc->status = 0; 266 if (rxstatus) {
258 267 csrwr8(0, desc, sgdma_descroffs(status));
259 rxbuffer = dequeue_rx(priv); 268
260 if (rxbuffer == NULL) 269 rxbuffer = dequeue_rx(priv);
270 if (rxbuffer == NULL)
271 netdev_info(priv->dev,
272 "sgdma rx and rx queue empty!\n");
273
274 /* Clear control */
275 csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
276 /* clear status */
277 csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
278
279 /* kick the rx sgdma after reaping this descriptor */
280 sgdma_async_read(priv);
281
282 } else {
283 /* If the SGDMA indicated an end of packet on recv,
284 * then it's expected that the rxstatus from the
285 * descriptor is non-zero - meaning a valid packet
286 * with a nonzero length, or an error has been
287 * indicated. if not, then all we can do is signal
288 * an error and return no packet received. Most likely
289 * there is a system design error, or an error in the
290 * underlying kernel (cache or cache management problem)
291 */
261 netdev_err(priv->dev, 292 netdev_err(priv->dev,
262 "sgdma rx and rx queue empty!\n"); 293 "SGDMA RX Error Info: %x, %x, %x\n",
263 294 sts, csrrd8(desc, sgdma_descroffs(status)),
264 /* kick the rx sgdma after reaping this descriptor */ 295 rxstatus);
265 pktsrx = sgdma_async_read(priv); 296 }
297 } else if (sts == 0) {
298 sgdma_async_read(priv);
266 } 299 }
267 300
268 return rxstatus; 301 return rxstatus;
@@ -270,38 +303,41 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
270 303
271 304
272/* Private functions */ 305/* Private functions */
273static void sgdma_descrip(struct sgdma_descrip *desc, 306static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
274 struct sgdma_descrip *ndesc, 307 struct sgdma_descrip __iomem *ndesc,
275 dma_addr_t ndesc_phys, 308 dma_addr_t ndesc_phys,
276 dma_addr_t raddr, 309 dma_addr_t raddr,
277 dma_addr_t waddr, 310 dma_addr_t waddr,
278 u16 length, 311 u16 length,
279 int generate_eop, 312 int generate_eop,
280 int rfixed, 313 int rfixed,
281 int wfixed) 314 int wfixed)
282{ 315{
283 /* Clear the next descriptor as not owned by hardware */ 316 /* Clear the next descriptor as not owned by hardware */
284 u32 ctrl = ndesc->control; 317
318 u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
285 ctrl &= ~SGDMA_CONTROL_HW_OWNED; 319 ctrl &= ~SGDMA_CONTROL_HW_OWNED;
286 ndesc->control = ctrl; 320 csrwr8(ctrl, ndesc, sgdma_descroffs(control));
287 321
288 ctrl = 0;
289 ctrl = SGDMA_CONTROL_HW_OWNED; 322 ctrl = SGDMA_CONTROL_HW_OWNED;
290 ctrl |= generate_eop; 323 ctrl |= generate_eop;
291 ctrl |= rfixed; 324 ctrl |= rfixed;
292 ctrl |= wfixed; 325 ctrl |= wfixed;
293 326
294 /* Channel is implicitly zero, initialized to 0 by default */ 327 /* Channel is implicitly zero, initialized to 0 by default */
295 328 csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
296 desc->raddr = raddr; 329 csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
297 desc->waddr = waddr; 330
298 desc->next = lower_32_bits(ndesc_phys); 331 csrwr32(0, desc, sgdma_descroffs(pad1));
299 desc->control = ctrl; 332 csrwr32(0, desc, sgdma_descroffs(pad2));
300 desc->status = 0; 333 csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
301 desc->rburst = 0; 334
302 desc->wburst = 0; 335 csrwr8(ctrl, desc, sgdma_descroffs(control));
303 desc->bytes = length; 336 csrwr8(0, desc, sgdma_descroffs(status));
304 desc->bytes_xferred = 0; 337 csrwr8(0, desc, sgdma_descroffs(wburst));
338 csrwr8(0, desc, sgdma_descroffs(rburst));
339 csrwr16(length, desc, sgdma_descroffs(bytes));
340 csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
305} 341}
306 342
307/* If hardware is busy, don't restart async read. 343/* If hardware is busy, don't restart async read.
@@ -312,48 +348,43 @@ static void sgdma_descrip(struct sgdma_descrip *desc,
312 */ 348 */
313static int sgdma_async_read(struct altera_tse_private *priv) 349static int sgdma_async_read(struct altera_tse_private *priv)
314{ 350{
315 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 351 struct sgdma_descrip __iomem *descbase =
316 struct sgdma_descrip *descbase = 352 (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
317 (struct sgdma_descrip *)priv->rx_dma_desc;
318 353
319 struct sgdma_descrip *cdesc = &descbase[0]; 354 struct sgdma_descrip __iomem *cdesc = &descbase[0];
320 struct sgdma_descrip *ndesc = &descbase[1]; 355 struct sgdma_descrip __iomem *ndesc = &descbase[1];
321 356
322 unsigned int sts = ioread32(&csr->status);
323 struct tse_buffer *rxbuffer = NULL; 357 struct tse_buffer *rxbuffer = NULL;
324 358
325 if (!sgdma_rxbusy(priv)) { 359 if (!sgdma_rxbusy(priv)) {
326 rxbuffer = queue_rx_peekhead(priv); 360 rxbuffer = queue_rx_peekhead(priv);
327 if (rxbuffer == NULL) 361 if (rxbuffer == NULL) {
362 netdev_err(priv->dev, "no rx buffers available\n");
328 return 0; 363 return 0;
329 364 }
330 sgdma_descrip(cdesc, /* current descriptor */ 365
331 ndesc, /* next descriptor */ 366 sgdma_setup_descrip(cdesc, /* current descriptor */
332 sgdma_rxphysaddr(priv, ndesc), 367 ndesc, /* next descriptor */
333 0, /* read addr 0 for rx dma */ 368 sgdma_rxphysaddr(priv, ndesc),
334 rxbuffer->dma_addr, /* write addr for rx dma */ 369 0, /* read addr 0 for rx dma */
335 0, /* read 'til EOP */ 370 rxbuffer->dma_addr, /* write addr for rx dma */
336 0, /* EOP: NA for rx dma */ 371 0, /* read 'til EOP */
337 0, /* read fixed: NA for rx dma */ 372 0, /* EOP: NA for rx dma */
338 0); /* SOP: NA for rx DMA */ 373 0, /* read fixed: NA for rx dma */
339 374 0); /* SOP: NA for rx DMA */
340 /* clear control and status */
341 iowrite32(0, &csr->control);
342
343 /* If status available, clear those bits */
344 if (sts & 0xf)
345 iowrite32(0xf, &csr->status);
346 375
347 dma_sync_single_for_device(priv->device, 376 dma_sync_single_for_device(priv->device,
348 priv->rxdescphys, 377 priv->rxdescphys,
349 priv->rxdescmem, 378 priv->sgdmadesclen,
350 DMA_BIDIRECTIONAL); 379 DMA_TO_DEVICE);
351 380
352 iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), 381 csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
353 &csr->next_descrip); 382 priv->rx_dma_csr,
383 sgdma_csroffs(next_descrip));
354 384
355 iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START), 385 csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
356 &csr->control); 386 priv->rx_dma_csr,
387 sgdma_csroffs(control));
357 388
358 return 1; 389 return 1;
359 } 390 }
@@ -362,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv)
362} 393}
363 394
364static int sgdma_async_write(struct altera_tse_private *priv, 395static int sgdma_async_write(struct altera_tse_private *priv,
365 struct sgdma_descrip *desc) 396 struct sgdma_descrip __iomem *desc)
366{ 397{
367 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
368
369 if (sgdma_txbusy(priv)) 398 if (sgdma_txbusy(priv))
370 return 0; 399 return 0;
371 400
372 /* clear control and status */ 401 /* clear control and status */
373 iowrite32(0, &csr->control); 402 csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
374 iowrite32(0x1f, &csr->status); 403 csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
375 404
376 dma_sync_single_for_device(priv->device, priv->txdescphys, 405 dma_sync_single_for_device(priv->device, priv->txdescphys,
377 priv->txdescmem, DMA_TO_DEVICE); 406 priv->sgdmadesclen, DMA_TO_DEVICE);
378 407
379 iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)), 408 csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
380 &csr->next_descrip); 409 priv->tx_dma_csr,
410 sgdma_csroffs(next_descrip));
381 411
382 iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START), 412 csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
383 &csr->control); 413 priv->tx_dma_csr,
414 sgdma_csroffs(control));
384 415
385 return 1; 416 return 1;
386} 417}
387 418
388static dma_addr_t 419static dma_addr_t
389sgdma_txphysaddr(struct altera_tse_private *priv, 420sgdma_txphysaddr(struct altera_tse_private *priv,
390 struct sgdma_descrip *desc) 421 struct sgdma_descrip __iomem *desc)
391{ 422{
392 dma_addr_t paddr = priv->txdescmem_busaddr; 423 dma_addr_t paddr = priv->txdescmem_busaddr;
393 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; 424 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
@@ -396,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv,
396 427
397static dma_addr_t 428static dma_addr_t
398sgdma_rxphysaddr(struct altera_tse_private *priv, 429sgdma_rxphysaddr(struct altera_tse_private *priv,
399 struct sgdma_descrip *desc) 430 struct sgdma_descrip __iomem *desc)
400{ 431{
401 dma_addr_t paddr = priv->rxdescmem_busaddr; 432 dma_addr_t paddr = priv->rxdescmem_busaddr;
402 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; 433 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@ -485,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv)
485 */ 516 */
486static int sgdma_rxbusy(struct altera_tse_private *priv) 517static int sgdma_rxbusy(struct altera_tse_private *priv)
487{ 518{
488 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 519 return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
489 return ioread32(&csr->status) & SGDMA_STSREG_BUSY; 520 & SGDMA_STSREG_BUSY;
490} 521}
491 522
492/* waits for the tx sgdma to finish it's current operation, returns 0 523/* waits for the tx sgdma to finish it's current operation, returns 0
@@ -495,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
495static int sgdma_txbusy(struct altera_tse_private *priv) 526static int sgdma_txbusy(struct altera_tse_private *priv)
496{ 527{
497 int delay = 0; 528 int delay = 0;
498 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
499 529
500 /* if DMA is busy, wait for current transactino to finish */ 530 /* if DMA is busy, wait for current transactino to finish */
501 while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100)) 531 while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
532 & SGDMA_STSREG_BUSY) && (delay++ < 100))
502 udelay(1); 533 udelay(1);
503 534
504 if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) { 535 if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
536 & SGDMA_STSREG_BUSY) {
505 netdev_err(priv->dev, "timeout waiting for tx dma\n"); 537 netdev_err(priv->dev, "timeout waiting for tx dma\n");
506 return 1; 538 return 1;
507 } 539 }
diff --git a/drivers/net/ethernet/altera/altera_sgdma.h b/drivers/net/ethernet/altera/altera_sgdma.h
index 07d471729dc4..584977e29ef9 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.h
+++ b/drivers/net/ethernet/altera/altera_sgdma.h
@@ -26,10 +26,11 @@ void sgdma_clear_rxirq(struct altera_tse_private *);
26void sgdma_clear_txirq(struct altera_tse_private *); 26void sgdma_clear_txirq(struct altera_tse_private *);
27int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *); 27int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *);
28u32 sgdma_tx_completions(struct altera_tse_private *); 28u32 sgdma_tx_completions(struct altera_tse_private *);
29int sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *); 29void sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
30void sgdma_status(struct altera_tse_private *); 30void sgdma_status(struct altera_tse_private *);
31u32 sgdma_rx_status(struct altera_tse_private *); 31u32 sgdma_rx_status(struct altera_tse_private *);
32int sgdma_initialize(struct altera_tse_private *); 32int sgdma_initialize(struct altera_tse_private *);
33void sgdma_uninitialize(struct altera_tse_private *); 33void sgdma_uninitialize(struct altera_tse_private *);
34void sgdma_start_rxdma(struct altera_tse_private *);
34 35
35#endif /* __ALTERA_SGDMA_H__ */ 36#endif /* __ALTERA_SGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
index ba3334f35383..85bc33b218d9 100644
--- a/drivers/net/ethernet/altera/altera_sgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -19,16 +19,16 @@
19 19
20/* SGDMA descriptor structure */ 20/* SGDMA descriptor structure */
21struct sgdma_descrip { 21struct sgdma_descrip {
22 unsigned int raddr; /* address of data to be read */ 22 u32 raddr; /* address of data to be read */
23 unsigned int pad1; 23 u32 pad1;
24 unsigned int waddr; 24 u32 waddr;
25 unsigned int pad2; 25 u32 pad2;
26 unsigned int next; 26 u32 next;
27 unsigned int pad3; 27 u32 pad3;
28 unsigned short bytes; 28 u16 bytes;
29 unsigned char rburst; 29 u8 rburst;
30 unsigned char wburst; 30 u8 wburst;
31 unsigned short bytes_xferred; /* 16 bits, bytes xferred */ 31 u16 bytes_xferred; /* 16 bits, bytes xferred */
32 32
33 /* bit 0: error 33 /* bit 0: error
34 * bit 1: length error 34 * bit 1: length error
@@ -39,7 +39,7 @@ struct sgdma_descrip {
39 * bit 6: reserved 39 * bit 6: reserved
40 * bit 7: status eop for recv case 40 * bit 7: status eop for recv case
41 */ 41 */
42 unsigned char status; 42 u8 status;
43 43
44 /* bit 0: eop 44 /* bit 0: eop
45 * bit 1: read_fixed 45 * bit 1: read_fixed
@@ -47,7 +47,7 @@ struct sgdma_descrip {
47 * bits 3,4,5,6: Channel (always 0) 47 * bits 3,4,5,6: Channel (always 0)
48 * bit 7: hardware owned 48 * bit 7: hardware owned
49 */ 49 */
50 unsigned char control; 50 u8 control;
51} __packed; 51} __packed;
52 52
53 53
@@ -101,6 +101,8 @@ struct sgdma_csr {
101 u32 pad3[3]; 101 u32 pad3[3];
102}; 102};
103 103
104#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a))
105#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a))
104 106
105#define SGDMA_STSREG_ERR BIT(0) /* Error */ 107#define SGDMA_STSREG_ERR BIT(0) /* Error */
106#define SGDMA_STSREG_EOP BIT(1) /* EOP */ 108#define SGDMA_STSREG_EOP BIT(1) /* EOP */
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 8feeed05de0e..2adb24d4523c 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -58,6 +58,8 @@
58/* MAC function configuration default settings */ 58/* MAC function configuration default settings */
59#define ALTERA_TSE_TX_IPG_LENGTH 12 59#define ALTERA_TSE_TX_IPG_LENGTH 12
60 60
61#define ALTERA_TSE_PAUSE_QUANTA 0xffff
62
61#define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1) 63#define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1)
62 64
63/* MAC Command_Config Register Bit Definitions 65/* MAC Command_Config Register Bit Definitions
@@ -355,6 +357,8 @@ struct altera_tse_mac {
355 u32 reserved5[42]; 357 u32 reserved5[42];
356}; 358};
357 359
360#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a))
361
358/* Transmit and Receive Command Registers Bit Definitions 362/* Transmit and Receive Command Registers Bit Definitions
359 */ 363 */
360#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17) 364#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17)
@@ -390,10 +394,11 @@ struct altera_dmaops {
390 void (*clear_rxirq)(struct altera_tse_private *); 394 void (*clear_rxirq)(struct altera_tse_private *);
391 int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *); 395 int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *);
392 u32 (*tx_completions)(struct altera_tse_private *); 396 u32 (*tx_completions)(struct altera_tse_private *);
393 int (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *); 397 void (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *);
394 u32 (*get_rx_status)(struct altera_tse_private *); 398 u32 (*get_rx_status)(struct altera_tse_private *);
395 int (*init_dma)(struct altera_tse_private *); 399 int (*init_dma)(struct altera_tse_private *);
396 void (*uninit_dma)(struct altera_tse_private *); 400 void (*uninit_dma)(struct altera_tse_private *);
401 void (*start_rxdma)(struct altera_tse_private *);
397}; 402};
398 403
399/* This structure is private to each device. 404/* This structure is private to each device.
@@ -453,6 +458,7 @@ struct altera_tse_private {
453 u32 rxctrlreg; 458 u32 rxctrlreg;
454 dma_addr_t rxdescphys; 459 dma_addr_t rxdescphys;
455 dma_addr_t txdescphys; 460 dma_addr_t txdescphys;
461 size_t sgdmadesclen;
456 462
457 struct list_head txlisthd; 463 struct list_head txlisthd;
458 struct list_head rxlisthd; 464 struct list_head rxlisthd;
@@ -483,4 +489,49 @@ struct altera_tse_private {
483 */ 489 */
484void altera_tse_set_ethtool_ops(struct net_device *); 490void altera_tse_set_ethtool_ops(struct net_device *);
485 491
492static inline
493u32 csrrd32(void __iomem *mac, size_t offs)
494{
495 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
496 return readl(paddr);
497}
498
499static inline
500u16 csrrd16(void __iomem *mac, size_t offs)
501{
502 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
503 return readw(paddr);
504}
505
506static inline
507u8 csrrd8(void __iomem *mac, size_t offs)
508{
509 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
510 return readb(paddr);
511}
512
513static inline
514void csrwr32(u32 val, void __iomem *mac, size_t offs)
515{
516 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
517
518 writel(val, paddr);
519}
520
521static inline
522void csrwr16(u16 val, void __iomem *mac, size_t offs)
523{
524 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
525
526 writew(val, paddr);
527}
528
529static inline
530void csrwr8(u8 val, void __iomem *mac, size_t offs)
531{
532 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
533
534 writeb(val, paddr);
535}
536
486#endif /* __ALTERA_TSE_H__ */ 537#endif /* __ALTERA_TSE_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 319ca74f5e74..54c25eff7952 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -77,7 +77,7 @@ static void tse_get_drvinfo(struct net_device *dev,
77 struct altera_tse_private *priv = netdev_priv(dev); 77 struct altera_tse_private *priv = netdev_priv(dev);
78 u32 rev = ioread32(&priv->mac_dev->megacore_revision); 78 u32 rev = ioread32(&priv->mac_dev->megacore_revision);
79 79
80 strcpy(info->driver, "Altera TSE MAC IP Driver"); 80 strcpy(info->driver, "altera_tse");
81 strcpy(info->version, "v8.0"); 81 strcpy(info->version, "v8.0");
82 snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d", 82 snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d",
83 rev & 0xFFFF, (rev & 0xFFFF0000) >> 16); 83 rev & 0xFFFF, (rev & 0xFFFF0000) >> 16);
@@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
96 u64 *buf) 96 u64 *buf)
97{ 97{
98 struct altera_tse_private *priv = netdev_priv(dev); 98 struct altera_tse_private *priv = netdev_priv(dev);
99 struct altera_tse_mac *mac = priv->mac_dev;
100 u64 ext; 99 u64 ext;
101 100
102 buf[0] = ioread32(&mac->frames_transmitted_ok); 101 buf[0] = csrrd32(priv->mac_dev,
103 buf[1] = ioread32(&mac->frames_received_ok); 102 tse_csroffs(frames_transmitted_ok));
104 buf[2] = ioread32(&mac->frames_check_sequence_errors); 103 buf[1] = csrrd32(priv->mac_dev,
105 buf[3] = ioread32(&mac->alignment_errors); 104 tse_csroffs(frames_received_ok));
105 buf[2] = csrrd32(priv->mac_dev,
106 tse_csroffs(frames_check_sequence_errors));
107 buf[3] = csrrd32(priv->mac_dev,
108 tse_csroffs(alignment_errors));
106 109
107 /* Extended aOctetsTransmittedOK counter */ 110 /* Extended aOctetsTransmittedOK counter */
108 ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32; 111 ext = (u64) csrrd32(priv->mac_dev,
109 ext |= ioread32(&mac->octets_transmitted_ok); 112 tse_csroffs(msb_octets_transmitted_ok)) << 32;
113
114 ext |= csrrd32(priv->mac_dev,
115 tse_csroffs(octets_transmitted_ok));
110 buf[4] = ext; 116 buf[4] = ext;
111 117
112 /* Extended aOctetsReceivedOK counter */ 118 /* Extended aOctetsReceivedOK counter */
113 ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32; 119 ext = (u64) csrrd32(priv->mac_dev,
114 ext |= ioread32(&mac->octets_received_ok); 120 tse_csroffs(msb_octets_received_ok)) << 32;
121
122 ext |= csrrd32(priv->mac_dev,
123 tse_csroffs(octets_received_ok));
115 buf[5] = ext; 124 buf[5] = ext;
116 125
117 buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames); 126 buf[6] = csrrd32(priv->mac_dev,
118 buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames); 127 tse_csroffs(tx_pause_mac_ctrl_frames));
119 buf[8] = ioread32(&mac->if_in_errors); 128 buf[7] = csrrd32(priv->mac_dev,
120 buf[9] = ioread32(&mac->if_out_errors); 129 tse_csroffs(rx_pause_mac_ctrl_frames));
121 buf[10] = ioread32(&mac->if_in_ucast_pkts); 130 buf[8] = csrrd32(priv->mac_dev,
122 buf[11] = ioread32(&mac->if_in_multicast_pkts); 131 tse_csroffs(if_in_errors));
123 buf[12] = ioread32(&mac->if_in_broadcast_pkts); 132 buf[9] = csrrd32(priv->mac_dev,
124 buf[13] = ioread32(&mac->if_out_discards); 133 tse_csroffs(if_out_errors));
125 buf[14] = ioread32(&mac->if_out_ucast_pkts); 134 buf[10] = csrrd32(priv->mac_dev,
126 buf[15] = ioread32(&mac->if_out_multicast_pkts); 135 tse_csroffs(if_in_ucast_pkts));
127 buf[16] = ioread32(&mac->if_out_broadcast_pkts); 136 buf[11] = csrrd32(priv->mac_dev,
128 buf[17] = ioread32(&mac->ether_stats_drop_events); 137 tse_csroffs(if_in_multicast_pkts));
138 buf[12] = csrrd32(priv->mac_dev,
139 tse_csroffs(if_in_broadcast_pkts));
140 buf[13] = csrrd32(priv->mac_dev,
141 tse_csroffs(if_out_discards));
142 buf[14] = csrrd32(priv->mac_dev,
143 tse_csroffs(if_out_ucast_pkts));
144 buf[15] = csrrd32(priv->mac_dev,
145 tse_csroffs(if_out_multicast_pkts));
146 buf[16] = csrrd32(priv->mac_dev,
147 tse_csroffs(if_out_broadcast_pkts));
148 buf[17] = csrrd32(priv->mac_dev,
149 tse_csroffs(ether_stats_drop_events));
129 150
130 /* Extended etherStatsOctets counter */ 151 /* Extended etherStatsOctets counter */
131 ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32; 152 ext = (u64) csrrd32(priv->mac_dev,
132 ext |= ioread32(&mac->ether_stats_octets); 153 tse_csroffs(msb_ether_stats_octets)) << 32;
154 ext |= csrrd32(priv->mac_dev,
155 tse_csroffs(ether_stats_octets));
133 buf[18] = ext; 156 buf[18] = ext;
134 157
135 buf[19] = ioread32(&mac->ether_stats_pkts); 158 buf[19] = csrrd32(priv->mac_dev,
136 buf[20] = ioread32(&mac->ether_stats_undersize_pkts); 159 tse_csroffs(ether_stats_pkts));
137 buf[21] = ioread32(&mac->ether_stats_oversize_pkts); 160 buf[20] = csrrd32(priv->mac_dev,
138 buf[22] = ioread32(&mac->ether_stats_pkts_64_octets); 161 tse_csroffs(ether_stats_undersize_pkts));
139 buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets); 162 buf[21] = csrrd32(priv->mac_dev,
140 buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets); 163 tse_csroffs(ether_stats_oversize_pkts));
141 buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets); 164 buf[22] = csrrd32(priv->mac_dev,
142 buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets); 165 tse_csroffs(ether_stats_pkts_64_octets));
143 buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets); 166 buf[23] = csrrd32(priv->mac_dev,
144 buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets); 167 tse_csroffs(ether_stats_pkts_65to127_octets));
145 buf[29] = ioread32(&mac->ether_stats_jabbers); 168 buf[24] = csrrd32(priv->mac_dev,
146 buf[30] = ioread32(&mac->ether_stats_fragments); 169 tse_csroffs(ether_stats_pkts_128to255_octets));
170 buf[25] = csrrd32(priv->mac_dev,
171 tse_csroffs(ether_stats_pkts_256to511_octets));
172 buf[26] = csrrd32(priv->mac_dev,
173 tse_csroffs(ether_stats_pkts_512to1023_octets));
174 buf[27] = csrrd32(priv->mac_dev,
175 tse_csroffs(ether_stats_pkts_1024to1518_octets));
176 buf[28] = csrrd32(priv->mac_dev,
177 tse_csroffs(ether_stats_pkts_1519tox_octets));
178 buf[29] = csrrd32(priv->mac_dev,
179 tse_csroffs(ether_stats_jabbers));
180 buf[30] = csrrd32(priv->mac_dev,
181 tse_csroffs(ether_stats_fragments));
147} 182}
148 183
149static int tse_sset_count(struct net_device *dev, int sset) 184static int tse_sset_count(struct net_device *dev, int sset)
@@ -178,19 +213,24 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
178{ 213{
179 int i; 214 int i;
180 struct altera_tse_private *priv = netdev_priv(dev); 215 struct altera_tse_private *priv = netdev_priv(dev);
181 u32 *tse_mac_regs = (u32 *)priv->mac_dev;
182 u32 *buf = regbuf; 216 u32 *buf = regbuf;
183 217
184 /* Set version to a known value, so ethtool knows 218 /* Set version to a known value, so ethtool knows
185 * how to do any special formatting of this data. 219 * how to do any special formatting of this data.
186 * This version number will need to change if and 220 * This version number will need to change if and
187 * when this register table is changed. 221 * when this register table is changed.
222 *
223 * version[31:0] = 1: Dump the first 128 TSE Registers
224 * Upper bits are all 0 by default
225 *
226 * Upper 16-bits will indicate feature presence for
227 * Ethtool register decoding in future version.
188 */ 228 */
189 229
190 regs->version = 1; 230 regs->version = 1;
191 231
192 for (i = 0; i < TSE_NUM_REGS; i++) 232 for (i = 0; i < TSE_NUM_REGS; i++)
193 buf[i] = ioread32(&tse_mac_regs[i]); 233 buf[i] = csrrd32(priv->mac_dev, i * 4);
194} 234}
195 235
196static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 236static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index c70a29e0b9f7..7330681574d2 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
100 */ 100 */
101static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 101static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
102{ 102{
103 struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; 103 struct net_device *ndev = bus->priv;
104 unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; 104 struct altera_tse_private *priv = netdev_priv(ndev);
105 u32 data;
106 105
107 /* set MDIO address */ 106 /* set MDIO address */
108 iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); 107 csrwr32((mii_id & 0x1f), priv->mac_dev,
108 tse_csroffs(mdio_phy0_addr));
109 109
110 /* get the data */ 110 /* get the data */
111 data = ioread32(&mdio_regs[regnum]) & 0xffff; 111 return csrrd32(priv->mac_dev,
112 return data; 112 tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
113} 113}
114 114
115static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 115static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
116 u16 value) 116 u16 value)
117{ 117{
118 struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; 118 struct net_device *ndev = bus->priv;
119 unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; 119 struct altera_tse_private *priv = netdev_priv(ndev);
120 120
121 /* set MDIO address */ 121 /* set MDIO address */
122 iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); 122 csrwr32((mii_id & 0x1f), priv->mac_dev,
123 tse_csroffs(mdio_phy0_addr));
123 124
124 /* write the data */ 125 /* write the data */
125 iowrite32((u32) value, &mdio_regs[regnum]); 126 csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
126 return 0; 127 return 0;
127} 128}
128 129
@@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
168 for (i = 0; i < PHY_MAX_ADDR; i++) 169 for (i = 0; i < PHY_MAX_ADDR; i++)
169 mdio->irq[i] = PHY_POLL; 170 mdio->irq[i] = PHY_POLL;
170 171
171 mdio->priv = priv->mac_dev; 172 mdio->priv = dev;
172 mdio->parent = priv->device; 173 mdio->parent = priv->device;
173 174
174 ret = of_mdiobus_register(mdio, mdio_node); 175 ret = of_mdiobus_register(mdio, mdio_node);
@@ -224,6 +225,7 @@ static int tse_init_rx_buffer(struct altera_tse_private *priv,
224 dev_kfree_skb_any(rxbuffer->skb); 225 dev_kfree_skb_any(rxbuffer->skb);
225 return -EINVAL; 226 return -EINVAL;
226 } 227 }
228 rxbuffer->dma_addr &= (dma_addr_t)~3;
227 rxbuffer->len = len; 229 rxbuffer->len = len;
228 return 0; 230 return 0;
229} 231}
@@ -425,9 +427,10 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
425 priv->dev->stats.rx_bytes += pktlength; 427 priv->dev->stats.rx_bytes += pktlength;
426 428
427 entry = next_entry; 429 entry = next_entry;
430
431 tse_rx_refill(priv);
428 } 432 }
429 433
430 tse_rx_refill(priv);
431 return count; 434 return count;
432} 435}
433 436
@@ -520,7 +523,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
520 struct altera_tse_private *priv; 523 struct altera_tse_private *priv;
521 unsigned long int flags; 524 unsigned long int flags;
522 525
523
524 if (unlikely(!dev)) { 526 if (unlikely(!dev)) {
525 pr_err("%s: invalid dev pointer\n", __func__); 527 pr_err("%s: invalid dev pointer\n", __func__);
526 return IRQ_NONE; 528 return IRQ_NONE;
@@ -562,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
562 unsigned int nopaged_len = skb_headlen(skb); 564 unsigned int nopaged_len = skb_headlen(skb);
563 enum netdev_tx ret = NETDEV_TX_OK; 565 enum netdev_tx ret = NETDEV_TX_OK;
564 dma_addr_t dma_addr; 566 dma_addr_t dma_addr;
565 int txcomplete = 0;
566 567
567 spin_lock_bh(&priv->tx_lock); 568 spin_lock_bh(&priv->tx_lock);
568 569
@@ -598,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
598 dma_sync_single_for_device(priv->device, buffer->dma_addr, 599 dma_sync_single_for_device(priv->device, buffer->dma_addr,
599 buffer->len, DMA_TO_DEVICE); 600 buffer->len, DMA_TO_DEVICE);
600 601
601 txcomplete = priv->dmaops->tx_buffer(priv, buffer); 602 priv->dmaops->tx_buffer(priv, buffer);
602 603
603 skb_tx_timestamp(skb); 604 skb_tx_timestamp(skb);
604 605
@@ -697,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
697 struct altera_tse_private *priv = netdev_priv(dev); 698 struct altera_tse_private *priv = netdev_priv(dev);
698 struct phy_device *phydev = NULL; 699 struct phy_device *phydev = NULL;
699 char phy_id_fmt[MII_BUS_ID_SIZE + 3]; 700 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
700 int ret;
701 701
702 if (priv->phy_addr != POLL_PHY) { 702 if (priv->phy_addr != POLL_PHY) {
703 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, 703 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
@@ -711,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
711 netdev_err(dev, "Could not attach to PHY\n"); 711 netdev_err(dev, "Could not attach to PHY\n");
712 712
713 } else { 713 } else {
714 int ret;
714 phydev = phy_find_first(priv->mdio); 715 phydev = phy_find_first(priv->mdio);
715 if (phydev == NULL) { 716 if (phydev == NULL) {
716 netdev_err(dev, "No PHY found\n"); 717 netdev_err(dev, "No PHY found\n");
@@ -790,7 +791,6 @@ static int init_phy(struct net_device *dev)
790 791
791static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) 792static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
792{ 793{
793 struct altera_tse_mac *mac = priv->mac_dev;
794 u32 msb; 794 u32 msb;
795 u32 lsb; 795 u32 lsb;
796 796
@@ -798,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
798 lsb = ((addr[5] << 8) | addr[4]) & 0xffff; 798 lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
799 799
800 /* Set primary MAC address */ 800 /* Set primary MAC address */
801 iowrite32(msb, &mac->mac_addr_0); 801 csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
802 iowrite32(lsb, &mac->mac_addr_1); 802 csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
803} 803}
804 804
805/* MAC software reset. 805/* MAC software reset.
@@ -810,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
810 */ 810 */
811static int reset_mac(struct altera_tse_private *priv) 811static int reset_mac(struct altera_tse_private *priv)
812{ 812{
813 void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
814 int counter; 813 int counter;
815 u32 dat; 814 u32 dat;
816 815
817 dat = ioread32(cmd_cfg_reg); 816 dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
818 dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); 817 dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
819 dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET; 818 dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
820 iowrite32(dat, cmd_cfg_reg); 819 csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
821 820
822 counter = 0; 821 counter = 0;
823 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 822 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
824 if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET)) 823 if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
824 MAC_CMDCFG_SW_RESET))
825 break; 825 break;
826 udelay(1); 826 udelay(1);
827 } 827 }
828 828
829 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 829 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
830 dat = ioread32(cmd_cfg_reg); 830 dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
831 dat &= ~MAC_CMDCFG_SW_RESET; 831 dat &= ~MAC_CMDCFG_SW_RESET;
832 iowrite32(dat, cmd_cfg_reg); 832 csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
833 return -1; 833 return -1;
834 } 834 }
835 return 0; 835 return 0;
@@ -839,42 +839,58 @@ static int reset_mac(struct altera_tse_private *priv)
839*/ 839*/
840static int init_mac(struct altera_tse_private *priv) 840static int init_mac(struct altera_tse_private *priv)
841{ 841{
842 struct altera_tse_mac *mac = priv->mac_dev;
843 unsigned int cmd = 0; 842 unsigned int cmd = 0;
844 u32 frm_length; 843 u32 frm_length;
845 844
846 /* Setup Rx FIFO */ 845 /* Setup Rx FIFO */
847 iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, 846 csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
848 &mac->rx_section_empty); 847 priv->mac_dev, tse_csroffs(rx_section_empty));
849 iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full); 848
850 iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty); 849 csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
851 iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full); 850 tse_csroffs(rx_section_full));
851
852 csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
853 tse_csroffs(rx_almost_empty));
854
855 csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
856 tse_csroffs(rx_almost_full));
852 857
853 /* Setup Tx FIFO */ 858 /* Setup Tx FIFO */
854 iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, 859 csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
855 &mac->tx_section_empty); 860 priv->mac_dev, tse_csroffs(tx_section_empty));
856 iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full); 861
857 iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty); 862 csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
858 iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full); 863 tse_csroffs(tx_section_full));
864
865 csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
866 tse_csroffs(tx_almost_empty));
867
868 csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
869 tse_csroffs(tx_almost_full));
859 870
860 /* MAC Address Configuration */ 871 /* MAC Address Configuration */
861 tse_update_mac_addr(priv, priv->dev->dev_addr); 872 tse_update_mac_addr(priv, priv->dev->dev_addr);
862 873
863 /* MAC Function Configuration */ 874 /* MAC Function Configuration */
864 frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; 875 frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
865 iowrite32(frm_length, &mac->frm_length); 876 csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
866 iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length); 877
878 csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
879 tse_csroffs(tx_ipg_length));
867 880
868 /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit 881 /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
869 * start address 882 * start address
870 */ 883 */
871 tse_clear_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); 884 tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
872 tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | 885 ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
873 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); 886
887 tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
888 ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
889 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
874 890
875 /* Set the MAC options */ 891 /* Set the MAC options */
876 cmd = ioread32(&mac->command_config); 892 cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
877 cmd |= MAC_CMDCFG_PAD_EN; /* Padding Removal on Receive */ 893 cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */
878 cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */ 894 cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */
879 cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames 895 cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames
880 * with CRC errors 896 * with CRC errors
@@ -882,7 +898,16 @@ static int init_mac(struct altera_tse_private *priv)
882 cmd |= MAC_CMDCFG_CNTL_FRM_ENA; 898 cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
883 cmd &= ~MAC_CMDCFG_TX_ENA; 899 cmd &= ~MAC_CMDCFG_TX_ENA;
884 cmd &= ~MAC_CMDCFG_RX_ENA; 900 cmd &= ~MAC_CMDCFG_RX_ENA;
885 iowrite32(cmd, &mac->command_config); 901
902 /* Default speed and duplex setting, full/100 */
903 cmd &= ~MAC_CMDCFG_HD_ENA;
904 cmd &= ~MAC_CMDCFG_ETH_SPEED;
905 cmd &= ~MAC_CMDCFG_ENA_10;
906
907 csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
908
909 csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
910 tse_csroffs(pause_quanta));
886 911
887 if (netif_msg_hw(priv)) 912 if (netif_msg_hw(priv))
888 dev_dbg(priv->device, 913 dev_dbg(priv->device,
@@ -895,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv)
895 */ 920 */
896static void tse_set_mac(struct altera_tse_private *priv, bool enable) 921static void tse_set_mac(struct altera_tse_private *priv, bool enable)
897{ 922{
898 struct altera_tse_mac *mac = priv->mac_dev; 923 u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
899 u32 value = ioread32(&mac->command_config);
900 924
901 if (enable) 925 if (enable)
902 value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA; 926 value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
903 else 927 else
904 value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); 928 value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
905 929
906 iowrite32(value, &mac->command_config); 930 csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
907} 931}
908 932
909/* Change the MTU 933/* Change the MTU
@@ -933,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
933static void altera_tse_set_mcfilter(struct net_device *dev) 957static void altera_tse_set_mcfilter(struct net_device *dev)
934{ 958{
935 struct altera_tse_private *priv = netdev_priv(dev); 959 struct altera_tse_private *priv = netdev_priv(dev);
936 struct altera_tse_mac *mac = priv->mac_dev;
937 int i; 960 int i;
938 struct netdev_hw_addr *ha; 961 struct netdev_hw_addr *ha;
939 962
940 /* clear the hash filter */ 963 /* clear the hash filter */
941 for (i = 0; i < 64; i++) 964 for (i = 0; i < 64; i++)
942 iowrite32(0, &(mac->hash_table[i])); 965 csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
943 966
944 netdev_for_each_mc_addr(ha, dev) { 967 netdev_for_each_mc_addr(ha, dev) {
945 unsigned int hash = 0; 968 unsigned int hash = 0;
@@ -955,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
955 978
956 hash = (hash << 1) | xor_bit; 979 hash = (hash << 1) | xor_bit;
957 } 980 }
958 iowrite32(1, &(mac->hash_table[hash])); 981 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
959 } 982 }
960} 983}
961 984
@@ -963,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
963static void altera_tse_set_mcfilterall(struct net_device *dev) 986static void altera_tse_set_mcfilterall(struct net_device *dev)
964{ 987{
965 struct altera_tse_private *priv = netdev_priv(dev); 988 struct altera_tse_private *priv = netdev_priv(dev);
966 struct altera_tse_mac *mac = priv->mac_dev;
967 int i; 989 int i;
968 990
969 /* set the hash filter */ 991 /* set the hash filter */
970 for (i = 0; i < 64; i++) 992 for (i = 0; i < 64; i++)
971 iowrite32(1, &(mac->hash_table[i])); 993 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
972} 994}
973 995
974/* Set or clear the multicast filter for this adaptor 996/* Set or clear the multicast filter for this adaptor
@@ -976,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
976static void tse_set_rx_mode_hashfilter(struct net_device *dev) 998static void tse_set_rx_mode_hashfilter(struct net_device *dev)
977{ 999{
978 struct altera_tse_private *priv = netdev_priv(dev); 1000 struct altera_tse_private *priv = netdev_priv(dev);
979 struct altera_tse_mac *mac = priv->mac_dev;
980 1001
981 spin_lock(&priv->mac_cfg_lock); 1002 spin_lock(&priv->mac_cfg_lock);
982 1003
983 if (dev->flags & IFF_PROMISC) 1004 if (dev->flags & IFF_PROMISC)
984 tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1005 tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1006 MAC_CMDCFG_PROMIS_EN);
985 1007
986 if (dev->flags & IFF_ALLMULTI) 1008 if (dev->flags & IFF_ALLMULTI)
987 altera_tse_set_mcfilterall(dev); 1009 altera_tse_set_mcfilterall(dev);
@@ -996,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
996static void tse_set_rx_mode(struct net_device *dev) 1018static void tse_set_rx_mode(struct net_device *dev)
997{ 1019{
998 struct altera_tse_private *priv = netdev_priv(dev); 1020 struct altera_tse_private *priv = netdev_priv(dev);
999 struct altera_tse_mac *mac = priv->mac_dev;
1000 1021
1001 spin_lock(&priv->mac_cfg_lock); 1022 spin_lock(&priv->mac_cfg_lock);
1002 1023
1003 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || 1024 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1004 !netdev_mc_empty(dev) || !netdev_uc_empty(dev)) 1025 !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
1005 tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1026 tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1027 MAC_CMDCFG_PROMIS_EN);
1006 else 1028 else
1007 tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1029 tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
1030 MAC_CMDCFG_PROMIS_EN);
1008 1031
1009 spin_unlock(&priv->mac_cfg_lock); 1032 spin_unlock(&priv->mac_cfg_lock);
1010} 1033}
@@ -1085,17 +1108,19 @@ static int tse_open(struct net_device *dev)
1085 1108
1086 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); 1109 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1087 1110
1088 /* Start MAC Rx/Tx */
1089 spin_lock(&priv->mac_cfg_lock);
1090 tse_set_mac(priv, true);
1091 spin_unlock(&priv->mac_cfg_lock);
1092
1093 if (priv->phydev) 1111 if (priv->phydev)
1094 phy_start(priv->phydev); 1112 phy_start(priv->phydev);
1095 1113
1096 napi_enable(&priv->napi); 1114 napi_enable(&priv->napi);
1097 netif_start_queue(dev); 1115 netif_start_queue(dev);
1098 1116
1117 priv->dmaops->start_rxdma(priv);
1118
1119 /* Start MAC Rx/Tx */
1120 spin_lock(&priv->mac_cfg_lock);
1121 tse_set_mac(priv, true);
1122 spin_unlock(&priv->mac_cfg_lock);
1123
1099 return 0; 1124 return 0;
1100 1125
1101tx_request_irq_error: 1126tx_request_irq_error:
@@ -1167,7 +1192,6 @@ static struct net_device_ops altera_tse_netdev_ops = {
1167 .ndo_validate_addr = eth_validate_addr, 1192 .ndo_validate_addr = eth_validate_addr,
1168}; 1193};
1169 1194
1170
1171static int request_and_map(struct platform_device *pdev, const char *name, 1195static int request_and_map(struct platform_device *pdev, const char *name,
1172 struct resource **res, void __iomem **ptr) 1196 struct resource **res, void __iomem **ptr)
1173{ 1197{
@@ -1235,7 +1259,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1235 /* Get the mapped address to the SGDMA descriptor memory */ 1259 /* Get the mapped address to the SGDMA descriptor memory */
1236 ret = request_and_map(pdev, "s1", &dma_res, &descmap); 1260 ret = request_and_map(pdev, "s1", &dma_res, &descmap);
1237 if (ret) 1261 if (ret)
1238 goto out_free; 1262 goto err_free_netdev;
1239 1263
1240 /* Start of that memory is for transmit descriptors */ 1264 /* Start of that memory is for transmit descriptors */
1241 priv->tx_dma_desc = descmap; 1265 priv->tx_dma_desc = descmap;
@@ -1254,24 +1278,24 @@ static int altera_tse_probe(struct platform_device *pdev)
1254 if (upper_32_bits(priv->rxdescmem_busaddr)) { 1278 if (upper_32_bits(priv->rxdescmem_busaddr)) {
1255 dev_dbg(priv->device, 1279 dev_dbg(priv->device,
1256 "SGDMA bus addresses greater than 32-bits\n"); 1280 "SGDMA bus addresses greater than 32-bits\n");
1257 goto out_free; 1281 goto err_free_netdev;
1258 } 1282 }
1259 if (upper_32_bits(priv->txdescmem_busaddr)) { 1283 if (upper_32_bits(priv->txdescmem_busaddr)) {
1260 dev_dbg(priv->device, 1284 dev_dbg(priv->device,
1261 "SGDMA bus addresses greater than 32-bits\n"); 1285 "SGDMA bus addresses greater than 32-bits\n");
1262 goto out_free; 1286 goto err_free_netdev;
1263 } 1287 }
1264 } else if (priv->dmaops && 1288 } else if (priv->dmaops &&
1265 priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) { 1289 priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
1266 ret = request_and_map(pdev, "rx_resp", &dma_res, 1290 ret = request_and_map(pdev, "rx_resp", &dma_res,
1267 &priv->rx_dma_resp); 1291 &priv->rx_dma_resp);
1268 if (ret) 1292 if (ret)
1269 goto out_free; 1293 goto err_free_netdev;
1270 1294
1271 ret = request_and_map(pdev, "tx_desc", &dma_res, 1295 ret = request_and_map(pdev, "tx_desc", &dma_res,
1272 &priv->tx_dma_desc); 1296 &priv->tx_dma_desc);
1273 if (ret) 1297 if (ret)
1274 goto out_free; 1298 goto err_free_netdev;
1275 1299
1276 priv->txdescmem = resource_size(dma_res); 1300 priv->txdescmem = resource_size(dma_res);
1277 priv->txdescmem_busaddr = dma_res->start; 1301 priv->txdescmem_busaddr = dma_res->start;
@@ -1279,13 +1303,13 @@ static int altera_tse_probe(struct platform_device *pdev)
1279 ret = request_and_map(pdev, "rx_desc", &dma_res, 1303 ret = request_and_map(pdev, "rx_desc", &dma_res,
1280 &priv->rx_dma_desc); 1304 &priv->rx_dma_desc);
1281 if (ret) 1305 if (ret)
1282 goto out_free; 1306 goto err_free_netdev;
1283 1307
1284 priv->rxdescmem = resource_size(dma_res); 1308 priv->rxdescmem = resource_size(dma_res);
1285 priv->rxdescmem_busaddr = dma_res->start; 1309 priv->rxdescmem_busaddr = dma_res->start;
1286 1310
1287 } else { 1311 } else {
1288 goto out_free; 1312 goto err_free_netdev;
1289 } 1313 }
1290 1314
1291 if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) 1315 if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
@@ -1294,26 +1318,26 @@ static int altera_tse_probe(struct platform_device *pdev)
1294 else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) 1318 else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
1295 dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); 1319 dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
1296 else 1320 else
1297 goto out_free; 1321 goto err_free_netdev;
1298 1322
1299 /* MAC address space */ 1323 /* MAC address space */
1300 ret = request_and_map(pdev, "control_port", &control_port, 1324 ret = request_and_map(pdev, "control_port", &control_port,
1301 (void __iomem **)&priv->mac_dev); 1325 (void __iomem **)&priv->mac_dev);
1302 if (ret) 1326 if (ret)
1303 goto out_free; 1327 goto err_free_netdev;
1304 1328
1305 /* xSGDMA Rx Dispatcher address space */ 1329 /* xSGDMA Rx Dispatcher address space */
1306 ret = request_and_map(pdev, "rx_csr", &dma_res, 1330 ret = request_and_map(pdev, "rx_csr", &dma_res,
1307 &priv->rx_dma_csr); 1331 &priv->rx_dma_csr);
1308 if (ret) 1332 if (ret)
1309 goto out_free; 1333 goto err_free_netdev;
1310 1334
1311 1335
1312 /* xSGDMA Tx Dispatcher address space */ 1336 /* xSGDMA Tx Dispatcher address space */
1313 ret = request_and_map(pdev, "tx_csr", &dma_res, 1337 ret = request_and_map(pdev, "tx_csr", &dma_res,
1314 &priv->tx_dma_csr); 1338 &priv->tx_dma_csr);
1315 if (ret) 1339 if (ret)
1316 goto out_free; 1340 goto err_free_netdev;
1317 1341
1318 1342
1319 /* Rx IRQ */ 1343 /* Rx IRQ */
@@ -1321,7 +1345,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1321 if (priv->rx_irq == -ENXIO) { 1345 if (priv->rx_irq == -ENXIO) {
1322 dev_err(&pdev->dev, "cannot obtain Rx IRQ\n"); 1346 dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
1323 ret = -ENXIO; 1347 ret = -ENXIO;
1324 goto out_free; 1348 goto err_free_netdev;
1325 } 1349 }
1326 1350
1327 /* Tx IRQ */ 1351 /* Tx IRQ */
@@ -1329,7 +1353,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1329 if (priv->tx_irq == -ENXIO) { 1353 if (priv->tx_irq == -ENXIO) {
1330 dev_err(&pdev->dev, "cannot obtain Tx IRQ\n"); 1354 dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
1331 ret = -ENXIO; 1355 ret = -ENXIO;
1332 goto out_free; 1356 goto err_free_netdev;
1333 } 1357 }
1334 1358
1335 /* get FIFO depths from device tree */ 1359 /* get FIFO depths from device tree */
@@ -1337,14 +1361,14 @@ static int altera_tse_probe(struct platform_device *pdev)
1337 &priv->rx_fifo_depth)) { 1361 &priv->rx_fifo_depth)) {
1338 dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n"); 1362 dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
1339 ret = -ENXIO; 1363 ret = -ENXIO;
1340 goto out_free; 1364 goto err_free_netdev;
1341 } 1365 }
1342 1366
1343 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", 1367 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1344 &priv->rx_fifo_depth)) { 1368 &priv->rx_fifo_depth)) {
1345 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); 1369 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1346 ret = -ENXIO; 1370 ret = -ENXIO;
1347 goto out_free; 1371 goto err_free_netdev;
1348 } 1372 }
1349 1373
1350 /* get hash filter settings for this instance */ 1374 /* get hash filter settings for this instance */
@@ -1352,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev)
1352 of_property_read_bool(pdev->dev.of_node, 1376 of_property_read_bool(pdev->dev.of_node,
1353 "altr,has-hash-multicast-filter"); 1377 "altr,has-hash-multicast-filter");
1354 1378
1379 /* Set hash filter to not set for now until the
1380 * multicast filter receive issue is debugged
1381 */
1382 priv->hash_filter = 0;
1383
1355 /* get supplemental address settings for this instance */ 1384 /* get supplemental address settings for this instance */
1356 priv->added_unicast = 1385 priv->added_unicast =
1357 of_property_read_bool(pdev->dev.of_node, 1386 of_property_read_bool(pdev->dev.of_node,
@@ -1393,7 +1422,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1393 ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) { 1422 ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
1394 dev_err(&pdev->dev, "invalid phy-addr specified %d\n", 1423 dev_err(&pdev->dev, "invalid phy-addr specified %d\n",
1395 priv->phy_addr); 1424 priv->phy_addr);
1396 goto out_free; 1425 goto err_free_netdev;
1397 } 1426 }
1398 1427
1399 /* Create/attach to MDIO bus */ 1428 /* Create/attach to MDIO bus */
@@ -1401,7 +1430,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1401 atomic_add_return(1, &instance_count)); 1430 atomic_add_return(1, &instance_count));
1402 1431
1403 if (ret) 1432 if (ret)
1404 goto out_free; 1433 goto err_free_netdev;
1405 1434
1406 /* initialize netdev */ 1435 /* initialize netdev */
1407 ether_setup(ndev); 1436 ether_setup(ndev);
@@ -1438,7 +1467,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1438 ret = register_netdev(ndev); 1467 ret = register_netdev(ndev);
1439 if (ret) { 1468 if (ret) {
1440 dev_err(&pdev->dev, "failed to register TSE net device\n"); 1469 dev_err(&pdev->dev, "failed to register TSE net device\n");
1441 goto out_free_mdio; 1470 goto err_register_netdev;
1442 } 1471 }
1443 1472
1444 platform_set_drvdata(pdev, ndev); 1473 platform_set_drvdata(pdev, ndev);
@@ -1455,13 +1484,16 @@ static int altera_tse_probe(struct platform_device *pdev)
1455 ret = init_phy(ndev); 1484 ret = init_phy(ndev);
1456 if (ret != 0) { 1485 if (ret != 0) {
1457 netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret); 1486 netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
1458 goto out_free_mdio; 1487 goto err_init_phy;
1459 } 1488 }
1460 return 0; 1489 return 0;
1461 1490
1462out_free_mdio: 1491err_init_phy:
1492 unregister_netdev(ndev);
1493err_register_netdev:
1494 netif_napi_del(&priv->napi);
1463 altera_tse_mdio_destroy(ndev); 1495 altera_tse_mdio_destroy(ndev);
1464out_free: 1496err_free_netdev:
1465 free_netdev(ndev); 1497 free_netdev(ndev);
1466 return ret; 1498 return ret;
1467} 1499}
@@ -1480,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev)
1480 return 0; 1512 return 0;
1481} 1513}
1482 1514
1483struct altera_dmaops altera_dtype_sgdma = { 1515static const struct altera_dmaops altera_dtype_sgdma = {
1484 .altera_dtype = ALTERA_DTYPE_SGDMA, 1516 .altera_dtype = ALTERA_DTYPE_SGDMA,
1485 .dmamask = 32, 1517 .dmamask = 32,
1486 .reset_dma = sgdma_reset, 1518 .reset_dma = sgdma_reset,
@@ -1496,9 +1528,10 @@ struct altera_dmaops altera_dtype_sgdma = {
1496 .get_rx_status = sgdma_rx_status, 1528 .get_rx_status = sgdma_rx_status,
1497 .init_dma = sgdma_initialize, 1529 .init_dma = sgdma_initialize,
1498 .uninit_dma = sgdma_uninitialize, 1530 .uninit_dma = sgdma_uninitialize,
1531 .start_rxdma = sgdma_start_rxdma,
1499}; 1532};
1500 1533
1501struct altera_dmaops altera_dtype_msgdma = { 1534static const struct altera_dmaops altera_dtype_msgdma = {
1502 .altera_dtype = ALTERA_DTYPE_MSGDMA, 1535 .altera_dtype = ALTERA_DTYPE_MSGDMA,
1503 .dmamask = 64, 1536 .dmamask = 64,
1504 .reset_dma = msgdma_reset, 1537 .reset_dma = msgdma_reset,
@@ -1514,6 +1547,7 @@ struct altera_dmaops altera_dtype_msgdma = {
1514 .get_rx_status = msgdma_rx_status, 1547 .get_rx_status = msgdma_rx_status,
1515 .init_dma = msgdma_initialize, 1548 .init_dma = msgdma_initialize,
1516 .uninit_dma = msgdma_uninitialize, 1549 .uninit_dma = msgdma_uninitialize,
1550 .start_rxdma = msgdma_start_rxdma,
1517}; 1551};
1518 1552
1519static struct of_device_id altera_tse_ids[] = { 1553static struct of_device_id altera_tse_ids[] = {
diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c
index 70fa13f486b2..d7eeb1713ad2 100644
--- a/drivers/net/ethernet/altera/altera_utils.c
+++ b/drivers/net/ethernet/altera/altera_utils.c
@@ -17,28 +17,28 @@
17#include "altera_tse.h" 17#include "altera_tse.h"
18#include "altera_utils.h" 18#include "altera_utils.h"
19 19
20void tse_set_bit(void __iomem *ioaddr, u32 bit_mask) 20void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
21{ 21{
22 u32 value = ioread32(ioaddr); 22 u32 value = csrrd32(ioaddr, offs);
23 value |= bit_mask; 23 value |= bit_mask;
24 iowrite32(value, ioaddr); 24 csrwr32(value, ioaddr, offs);
25} 25}
26 26
27void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask) 27void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
28{ 28{
29 u32 value = ioread32(ioaddr); 29 u32 value = csrrd32(ioaddr, offs);
30 value &= ~bit_mask; 30 value &= ~bit_mask;
31 iowrite32(value, ioaddr); 31 csrwr32(value, ioaddr, offs);
32} 32}
33 33
34int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask) 34int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask)
35{ 35{
36 u32 value = ioread32(ioaddr); 36 u32 value = csrrd32(ioaddr, offs);
37 return (value & bit_mask) ? 1 : 0; 37 return (value & bit_mask) ? 1 : 0;
38} 38}
39 39
40int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask) 40int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask)
41{ 41{
42 u32 value = ioread32(ioaddr); 42 u32 value = csrrd32(ioaddr, offs);
43 return (value & bit_mask) ? 0 : 1; 43 return (value & bit_mask) ? 0 : 1;
44} 44}
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
index ce1db36d3583..baf100ccf587 100644
--- a/drivers/net/ethernet/altera/altera_utils.h
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -19,9 +19,9 @@
19#ifndef __ALTERA_UTILS_H__ 19#ifndef __ALTERA_UTILS_H__
20#define __ALTERA_UTILS_H__ 20#define __ALTERA_UTILS_H__
21 21
22void tse_set_bit(void __iomem *ioaddr, u32 bit_mask); 22void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
23void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask); 23void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
24int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask); 24int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
25int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask); 25int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask);
26 26
27#endif /* __ALTERA_UTILS_H__*/ 27#endif /* __ALTERA_UTILS_H__*/
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index 928fac6dd10a..53f85bf71526 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -11,6 +11,7 @@
11#include <linux/dma-mapping.h> 11#include <linux/dma-mapping.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/phy.h> 13#include <linux/phy.h>
14#include <linux/clk.h>
14 15
15/* STATUS and ENABLE Register bit masks */ 16/* STATUS and ENABLE Register bit masks */
16#define TXINT_MASK (1<<0) /* Transmit interrupt */ 17#define TXINT_MASK (1<<0) /* Transmit interrupt */
@@ -131,6 +132,7 @@ struct arc_emac_priv {
131 struct mii_bus *bus; 132 struct mii_bus *bus;
132 133
133 void __iomem *regs; 134 void __iomem *regs;
135 struct clk *clk;
134 136
135 struct napi_struct napi; 137 struct napi_struct napi;
136 struct net_device_stats stats; 138 struct net_device_stats stats;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index eeecc29cf5b7..d647a7d115ac 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -574,6 +574,18 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
574 return NETDEV_TX_OK; 574 return NETDEV_TX_OK;
575} 575}
576 576
577static void arc_emac_set_address_internal(struct net_device *ndev)
578{
579 struct arc_emac_priv *priv = netdev_priv(ndev);
580 unsigned int addr_low, addr_hi;
581
582 addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]);
583 addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
584
585 arc_reg_set(priv, R_ADDRL, addr_low);
586 arc_reg_set(priv, R_ADDRH, addr_hi);
587}
588
577/** 589/**
578 * arc_emac_set_address - Set the MAC address for this device. 590 * arc_emac_set_address - Set the MAC address for this device.
579 * @ndev: Pointer to net_device structure. 591 * @ndev: Pointer to net_device structure.
@@ -587,9 +599,7 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
587 */ 599 */
588static int arc_emac_set_address(struct net_device *ndev, void *p) 600static int arc_emac_set_address(struct net_device *ndev, void *p)
589{ 601{
590 struct arc_emac_priv *priv = netdev_priv(ndev);
591 struct sockaddr *addr = p; 602 struct sockaddr *addr = p;
592 unsigned int addr_low, addr_hi;
593 603
594 if (netif_running(ndev)) 604 if (netif_running(ndev))
595 return -EBUSY; 605 return -EBUSY;
@@ -599,11 +609,7 @@ static int arc_emac_set_address(struct net_device *ndev, void *p)
599 609
600 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 610 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
601 611
602 addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]); 612 arc_emac_set_address_internal(ndev);
603 addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
604
605 arc_reg_set(priv, R_ADDRL, addr_low);
606 arc_reg_set(priv, R_ADDRH, addr_hi);
607 613
608 return 0; 614 return 0;
609} 615}
@@ -643,13 +649,6 @@ static int arc_emac_probe(struct platform_device *pdev)
643 return -ENODEV; 649 return -ENODEV;
644 } 650 }
645 651
646 /* Get CPU clock frequency from device tree */
647 if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
648 &clock_frequency)) {
649 dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
650 return -EINVAL;
651 }
652
653 /* Get IRQ from device tree */ 652 /* Get IRQ from device tree */
654 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 653 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
655 if (!irq) { 654 if (!irq) {
@@ -677,17 +676,36 @@ static int arc_emac_probe(struct platform_device *pdev)
677 priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs); 676 priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs);
678 if (IS_ERR(priv->regs)) { 677 if (IS_ERR(priv->regs)) {
679 err = PTR_ERR(priv->regs); 678 err = PTR_ERR(priv->regs);
680 goto out; 679 goto out_netdev;
681 } 680 }
682 dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs); 681 dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs);
683 682
683 priv->clk = of_clk_get(pdev->dev.of_node, 0);
684 if (IS_ERR(priv->clk)) {
685 /* Get CPU clock frequency from device tree */
686 if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
687 &clock_frequency)) {
688 dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
689 err = -EINVAL;
690 goto out_netdev;
691 }
692 } else {
693 err = clk_prepare_enable(priv->clk);
694 if (err) {
695 dev_err(&pdev->dev, "failed to enable clock\n");
696 goto out_clkget;
697 }
698
699 clock_frequency = clk_get_rate(priv->clk);
700 }
701
684 id = arc_reg_get(priv, R_ID); 702 id = arc_reg_get(priv, R_ID);
685 703
686 /* Check for EMAC revision 5 or 7, magic number */ 704 /* Check for EMAC revision 5 or 7, magic number */
687 if (!(id == 0x0005fd02 || id == 0x0007fd02)) { 705 if (!(id == 0x0005fd02 || id == 0x0007fd02)) {
688 dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id); 706 dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id);
689 err = -ENODEV; 707 err = -ENODEV;
690 goto out; 708 goto out_clken;
691 } 709 }
692 dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id); 710 dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id);
693 711
@@ -702,7 +720,7 @@ static int arc_emac_probe(struct platform_device *pdev)
702 ndev->name, ndev); 720 ndev->name, ndev);
703 if (err) { 721 if (err) {
704 dev_err(&pdev->dev, "could not allocate IRQ\n"); 722 dev_err(&pdev->dev, "could not allocate IRQ\n");
705 goto out; 723 goto out_clken;
706 } 724 }
707 725
708 /* Get MAC address from device tree */ 726 /* Get MAC address from device tree */
@@ -713,6 +731,7 @@ static int arc_emac_probe(struct platform_device *pdev)
713 else 731 else
714 eth_hw_addr_random(ndev); 732 eth_hw_addr_random(ndev);
715 733
734 arc_emac_set_address_internal(ndev);
716 dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr); 735 dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr);
717 736
718 /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ 737 /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */
@@ -722,7 +741,7 @@ static int arc_emac_probe(struct platform_device *pdev)
722 if (!priv->rxbd) { 741 if (!priv->rxbd) {
723 dev_err(&pdev->dev, "failed to allocate data buffers\n"); 742 dev_err(&pdev->dev, "failed to allocate data buffers\n");
724 err = -ENOMEM; 743 err = -ENOMEM;
725 goto out; 744 goto out_clken;
726 } 745 }
727 746
728 priv->txbd = priv->rxbd + RX_BD_NUM; 747 priv->txbd = priv->rxbd + RX_BD_NUM;
@@ -734,7 +753,7 @@ static int arc_emac_probe(struct platform_device *pdev)
734 err = arc_mdio_probe(pdev, priv); 753 err = arc_mdio_probe(pdev, priv);
735 if (err) { 754 if (err) {
736 dev_err(&pdev->dev, "failed to probe MII bus\n"); 755 dev_err(&pdev->dev, "failed to probe MII bus\n");
737 goto out; 756 goto out_clken;
738 } 757 }
739 758
740 priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, 759 priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
@@ -742,7 +761,7 @@ static int arc_emac_probe(struct platform_device *pdev)
742 if (!priv->phy_dev) { 761 if (!priv->phy_dev) {
743 dev_err(&pdev->dev, "of_phy_connect() failed\n"); 762 dev_err(&pdev->dev, "of_phy_connect() failed\n");
744 err = -ENODEV; 763 err = -ENODEV;
745 goto out; 764 goto out_mdio;
746 } 765 }
747 766
748 dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n", 767 dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n",
@@ -752,14 +771,25 @@ static int arc_emac_probe(struct platform_device *pdev)
752 771
753 err = register_netdev(ndev); 772 err = register_netdev(ndev);
754 if (err) { 773 if (err) {
755 netif_napi_del(&priv->napi);
756 dev_err(&pdev->dev, "failed to register network device\n"); 774 dev_err(&pdev->dev, "failed to register network device\n");
757 goto out; 775 goto out_netif_api;
758 } 776 }
759 777
760 return 0; 778 return 0;
761 779
762out: 780out_netif_api:
781 netif_napi_del(&priv->napi);
782 phy_disconnect(priv->phy_dev);
783 priv->phy_dev = NULL;
784out_mdio:
785 arc_mdio_remove(priv);
786out_clken:
787 if (!IS_ERR(priv->clk))
788 clk_disable_unprepare(priv->clk);
789out_clkget:
790 if (!IS_ERR(priv->clk))
791 clk_put(priv->clk);
792out_netdev:
763 free_netdev(ndev); 793 free_netdev(ndev);
764 return err; 794 return err;
765} 795}
@@ -774,6 +804,12 @@ static int arc_emac_remove(struct platform_device *pdev)
774 arc_mdio_remove(priv); 804 arc_mdio_remove(priv);
775 unregister_netdev(ndev); 805 unregister_netdev(ndev);
776 netif_napi_del(&priv->napi); 806 netif_napi_del(&priv->napi);
807
808 if (!IS_ERR(priv->clk)) {
809 clk_disable_unprepare(priv->clk);
810 clk_put(priv->clk);
811 }
812
777 free_netdev(ndev); 813 free_netdev(ndev);
778 814
779 return 0; 815 return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index a8efb18e42fa..0ab83708b6a1 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8627,6 +8627,7 @@ bnx2_remove_one(struct pci_dev *pdev)
8627 pci_disable_device(pdev); 8627 pci_disable_device(pdev);
8628} 8628}
8629 8629
8630#ifdef CONFIG_PM_SLEEP
8630static int 8631static int
8631bnx2_suspend(struct device *device) 8632bnx2_suspend(struct device *device)
8632{ 8633{
@@ -8665,7 +8666,6 @@ bnx2_resume(struct device *device)
8665 return 0; 8666 return 0;
8666} 8667}
8667 8668
8668#ifdef CONFIG_PM_SLEEP
8669static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume); 8669static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8670#define BNX2_PM_OPS (&bnx2_pm_ops) 8670#define BNX2_PM_OPS (&bnx2_pm_ops)
8671 8671
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index a78edaccceee..3b0d43154e67 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10051#define BCM_5710_UNDI_FW_MF_MAJOR (0x07) 10051#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
10052#define BCM_5710_UNDI_FW_MF_MINOR (0x08) 10052#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
10053#define BCM_5710_UNDI_FW_MF_VERS (0x05) 10053#define BCM_5710_UNDI_FW_MF_VERS (0x05)
10054#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4)) 10054#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4)) 10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
10056static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) 10056static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
10057{ 10057{
10058 u8 major, minor, version; 10058 u8 major, minor, version;
@@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10352 /* Reset should be performed after BRB is emptied */ 10352 /* Reset should be performed after BRB is emptied */
10353 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 10353 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10354 u32 timer_count = 1000; 10354 u32 timer_count = 1000;
10355 bool need_write = true;
10355 10356
10356 /* Close the MAC Rx to prevent BRB from filling up */ 10357 /* Close the MAC Rx to prevent BRB from filling up */
10357 bnx2x_prev_unload_close_mac(bp, &mac_vals); 10358 bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10398 * cleaning methods - might be redundant but harmless. 10399 * cleaning methods - might be redundant but harmless.
10399 */ 10400 */
10400 if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { 10401 if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
10401 bnx2x_prev_unload_undi_mf(bp); 10402 if (need_write) {
10403 bnx2x_prev_unload_undi_mf(bp);
10404 need_write = false;
10405 }
10402 } else if (prev_undi) { 10406 } else if (prev_undi) {
10403 /* If UNDI resides in memory, 10407 /* If UNDI resides in memory,
10404 * manually increment it 10408 * manually increment it
@@ -13233,6 +13237,8 @@ static void __bnx2x_remove(struct pci_dev *pdev,
13233 iounmap(bp->doorbells); 13237 iounmap(bp->doorbells);
13234 13238
13235 bnx2x_release_firmware(bp); 13239 bnx2x_release_firmware(bp);
13240 } else {
13241 bnx2x_vf_pci_dealloc(bp);
13236 } 13242 }
13237 bnx2x_free_mem_bp(bp); 13243 bnx2x_free_mem_bp(bp);
13238 13244
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 5c523b32db70..b8078d50261b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -427,7 +427,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
427 if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && 427 if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
428 (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= 428 (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
429 vf_vlan_rules_cnt(vf))) { 429 vf_vlan_rules_cnt(vf))) {
430 BNX2X_ERR("No credits for vlan\n"); 430 BNX2X_ERR("No credits for vlan [%d >= %d]\n",
431 atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
432 vf_vlan_rules_cnt(vf));
431 return -ENOMEM; 433 return -ENOMEM;
432 } 434 }
433 435
@@ -610,6 +612,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
610 } 612 }
611 613
612 /* add new mcasts */ 614 /* add new mcasts */
615 mcast.mcast_list_len = mc_num;
613 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); 616 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
614 if (rc) 617 if (rc)
615 BNX2X_ERR("Faled to add multicasts\n"); 618 BNX2X_ERR("Faled to add multicasts\n");
@@ -837,6 +840,29 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
837 return 0; 840 return 0;
838} 841}
839 842
843static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
844 struct bnx2x_virtf *vf,
845 int new)
846{
847 int num = vf_vlan_rules_cnt(vf);
848 int diff = new - num;
849 bool rc = true;
850
851 DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
852 vf->abs_vfid, new, num);
853
854 if (diff > 0)
855 rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
856 else if (diff < 0)
857 rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
858
859 if (rc)
860 vf_vlan_rules_cnt(vf) = new;
861 else
862 DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
863 vf->abs_vfid);
864}
865
840/* must be called after the number of PF queues and the number of VFs are 866/* must be called after the number of PF queues and the number of VFs are
841 * both known 867 * both known
842 */ 868 */
@@ -854,9 +880,11 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
854 resc->num_mac_filters = 1; 880 resc->num_mac_filters = 1;
855 881
856 /* divvy up vlan rules */ 882 /* divvy up vlan rules */
883 bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
857 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 884 vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
858 vlan_count = 1 << ilog2(vlan_count); 885 vlan_count = 1 << ilog2(vlan_count);
859 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 886 bnx2x_iov_re_set_vlan_filters(bp, vf,
887 vlan_count / BNX2X_NR_VIRTFN(bp));
860 888
861 /* no real limitation */ 889 /* no real limitation */
862 resc->num_mc_filters = 0; 890 resc->num_mc_filters = 0;
@@ -1478,10 +1506,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
1478 bnx2x_iov_static_resc(bp, vf); 1506 bnx2x_iov_static_resc(bp, vf);
1479 1507
1480 /* queues are initialized during VF-ACQUIRE */ 1508 /* queues are initialized during VF-ACQUIRE */
1481
1482 /* reserve the vf vlan credit */
1483 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
1484
1485 vf->filter_state = 0; 1509 vf->filter_state = 0;
1486 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 1510 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1487 1511
@@ -1912,11 +1936,12 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
1912 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1936 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1913 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1937 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1914 1938
1939 /* Save a vlan filter for the Hypervisor */
1915 return ((req_resc->num_rxqs <= rxq_cnt) && 1940 return ((req_resc->num_rxqs <= rxq_cnt) &&
1916 (req_resc->num_txqs <= txq_cnt) && 1941 (req_resc->num_txqs <= txq_cnt) &&
1917 (req_resc->num_sbs <= vf_sb_count(vf)) && 1942 (req_resc->num_sbs <= vf_sb_count(vf)) &&
1918 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 1943 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
1919 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 1944 (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
1920} 1945}
1921 1946
1922/* CORE VF API */ 1947/* CORE VF API */
@@ -1972,14 +1997,14 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1972 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 1997 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
1973 if (resc->num_mac_filters) 1998 if (resc->num_mac_filters)
1974 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 1999 vf_mac_rules_cnt(vf) = resc->num_mac_filters;
1975 if (resc->num_vlan_filters) 2000 /* Add an additional vlan filter credit for the hypervisor */
1976 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 2001 bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
1977 2002
1978 DP(BNX2X_MSG_IOV, 2003 DP(BNX2X_MSG_IOV,
1979 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2004 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
1980 vf_sb_count(vf), vf_rxq_count(vf), 2005 vf_sb_count(vf), vf_rxq_count(vf),
1981 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2006 vf_txq_count(vf), vf_mac_rules_cnt(vf),
1982 vf_vlan_rules_cnt(vf)); 2007 vf_vlan_rules_visible_cnt(vf));
1983 2008
1984 /* Initialize the queues */ 2009 /* Initialize the queues */
1985 if (!vf->vfqs) { 2010 if (!vf->vfqs) {
@@ -2670,7 +2695,7 @@ out:
2670 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2695 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2671 } 2696 }
2672 2697
2673 return 0; 2698 return rc;
2674} 2699}
2675 2700
2676int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 2701int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
@@ -2896,6 +2921,14 @@ void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
2896 return bp->regview + PXP_VF_ADDR_DB_START; 2921 return bp->regview + PXP_VF_ADDR_DB_START;
2897} 2922}
2898 2923
2924void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
2925{
2926 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
2927 sizeof(struct bnx2x_vf_mbx_msg));
2928 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
2929 sizeof(union pf_vf_bulletin));
2930}
2931
2899int bnx2x_vf_pci_alloc(struct bnx2x *bp) 2932int bnx2x_vf_pci_alloc(struct bnx2x *bp)
2900{ 2933{
2901 mutex_init(&bp->vf2pf_mutex); 2934 mutex_init(&bp->vf2pf_mutex);
@@ -2915,10 +2948,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
2915 return 0; 2948 return 0;
2916 2949
2917alloc_mem_err: 2950alloc_mem_err:
2918 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 2951 bnx2x_vf_pci_dealloc(bp);
2919 sizeof(struct bnx2x_vf_mbx_msg));
2920 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
2921 sizeof(union pf_vf_bulletin));
2922 return -ENOMEM; 2952 return -ENOMEM;
2923} 2953}
2924 2954
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 8bf764570eef..6929adba52f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -159,6 +159,8 @@ struct bnx2x_virtf {
159#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) 159#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
160#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) 160#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
161#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) 161#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
162 /* Hide a single vlan filter credit for the hypervisor */
163#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1)
162 164
163 u8 sb_count; /* actual number of SBs */ 165 u8 sb_count; /* actual number of SBs */
164 u8 igu_base_id; /* base igu status block id */ 166 u8 igu_base_id; /* base igu status block id */
@@ -502,6 +504,7 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
502enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); 504enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
503void bnx2x_timer_sriov(struct bnx2x *bp); 505void bnx2x_timer_sriov(struct bnx2x *bp);
504void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); 506void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
507void bnx2x_vf_pci_dealloc(struct bnx2x *bp);
505int bnx2x_vf_pci_alloc(struct bnx2x *bp); 508int bnx2x_vf_pci_alloc(struct bnx2x *bp);
506int bnx2x_enable_sriov(struct bnx2x *bp); 509int bnx2x_enable_sriov(struct bnx2x *bp);
507void bnx2x_disable_sriov(struct bnx2x *bp); 510void bnx2x_disable_sriov(struct bnx2x *bp);
@@ -568,6 +571,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
568 return NULL; 571 return NULL;
569} 572}
570 573
574static inline void bnx2x_vf_pci_dealloc(struct bnx2 *bp) {return 0; }
571static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } 575static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
572static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} 576static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
573static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } 577static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 0622884596b2..784c7155b98a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
747out: 747out:
748 bnx2x_vfpf_finalize(bp, &req->first_tlv); 748 bnx2x_vfpf_finalize(bp, &req->first_tlv);
749 749
750 return 0; 750 return rc;
751} 751}
752 752
753/* request pf to config rss table for vf queues*/ 753/* request pf to config rss table for vf queues*/
@@ -1163,7 +1163,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
1163 bnx2x_vf_max_queue_cnt(bp, vf); 1163 bnx2x_vf_max_queue_cnt(bp, vf);
1164 resc->num_sbs = vf_sb_count(vf); 1164 resc->num_sbs = vf_sb_count(vf);
1165 resc->num_mac_filters = vf_mac_rules_cnt(vf); 1165 resc->num_mac_filters = vf_mac_rules_cnt(vf);
1166 resc->num_vlan_filters = vf_vlan_rules_cnt(vf); 1166 resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
1167 resc->num_mc_filters = 0; 1167 resc->num_mc_filters = 0;
1168 1168
1169 if (status == PFVF_STATUS_SUCCESS) { 1169 if (status == PFVF_STATUS_SUCCESS) {
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index b9f7022f4e81..e5d95c5ce1ad 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12286,7 +12286,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
12286 if (tg3_flag(tp, MAX_RXPEND_64) && 12286 if (tg3_flag(tp, MAX_RXPEND_64) &&
12287 tp->rx_pending > 63) 12287 tp->rx_pending > 63)
12288 tp->rx_pending = 63; 12288 tp->rx_pending = 63;
12289 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 12289
12290 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12291 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12290 12292
12291 for (i = 0; i < tp->irq_max; i++) 12293 for (i = 0; i < tp->irq_max; i++)
12292 tp->napi[i].tx_pending = ering->tx_pending; 12294 tp->napi[i].tx_pending = ering->tx_pending;
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 751d5c7b312d..9e089d24466e 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -4,7 +4,7 @@
4 4
5config NET_CADENCE 5config NET_CADENCE
6 bool "Cadence devices" 6 bool "Cadence devices"
7 depends on HAS_IOMEM 7 depends on HAS_IOMEM && (ARM || AVR32 || MICROBLAZE || COMPILE_TEST)
8 default y 8 default y
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y. 10 If you have a network (Ethernet) card belonging to this class, say Y.
@@ -22,7 +22,7 @@ if NET_CADENCE
22 22
23config ARM_AT91_ETHER 23config ARM_AT91_ETHER
24 tristate "AT91RM9200 Ethernet support" 24 tristate "AT91RM9200 Ethernet support"
25 depends on HAS_DMA 25 depends on HAS_DMA && (ARCH_AT91RM9200 || COMPILE_TEST)
26 select MACB 26 select MACB
27 ---help--- 27 ---help---
28 If you wish to compile a kernel for the AT91RM9200 and enable 28 If you wish to compile a kernel for the AT91RM9200 and enable
@@ -30,7 +30,7 @@ config ARM_AT91_ETHER
30 30
31config MACB 31config MACB
32 tristate "Cadence MACB/GEM support" 32 tristate "Cadence MACB/GEM support"
33 depends on HAS_DMA 33 depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST)
34 select PHYLIB 34 select PHYLIB
35 ---help--- 35 ---help---
36 The Cadence MACB ethernet interface is found on many Atmel AT32 and 36 The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ca97005e24b4..e9daa072ebb4 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -599,25 +599,16 @@ static void gem_rx_refill(struct macb *bp)
599{ 599{
600 unsigned int entry; 600 unsigned int entry;
601 struct sk_buff *skb; 601 struct sk_buff *skb;
602 struct macb_dma_desc *desc;
603 dma_addr_t paddr; 602 dma_addr_t paddr;
604 603
605 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { 604 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
606 u32 addr, ctrl;
607
608 entry = macb_rx_ring_wrap(bp->rx_prepared_head); 605 entry = macb_rx_ring_wrap(bp->rx_prepared_head);
609 desc = &bp->rx_ring[entry];
610 606
611 /* Make hw descriptor updates visible to CPU */ 607 /* Make hw descriptor updates visible to CPU */
612 rmb(); 608 rmb();
613 609
614 addr = desc->addr;
615 ctrl = desc->ctrl;
616 bp->rx_prepared_head++; 610 bp->rx_prepared_head++;
617 611
618 if ((addr & MACB_BIT(RX_USED)))
619 continue;
620
621 if (bp->rx_skbuff[entry] == NULL) { 612 if (bp->rx_skbuff[entry] == NULL) {
622 /* allocate sk_buff for this free entry in ring */ 613 /* allocate sk_buff for this free entry in ring */
623 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); 614 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
@@ -698,7 +689,6 @@ static int gem_rx(struct macb *bp, int budget)
698 if (!(addr & MACB_BIT(RX_USED))) 689 if (!(addr & MACB_BIT(RX_USED)))
699 break; 690 break;
700 691
701 desc->addr &= ~MACB_BIT(RX_USED);
702 bp->rx_tail++; 692 bp->rx_tail++;
703 count++; 693 count++;
704 694
@@ -891,16 +881,15 @@ static int macb_poll(struct napi_struct *napi, int budget)
891 if (work_done < budget) { 881 if (work_done < budget) {
892 napi_complete(napi); 882 napi_complete(napi);
893 883
894 /*
895 * We've done what we can to clean the buffers. Make sure we
896 * get notified when new packets arrive.
897 */
898 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
899
900 /* Packets received while interrupts were disabled */ 884 /* Packets received while interrupts were disabled */
901 status = macb_readl(bp, RSR); 885 status = macb_readl(bp, RSR);
902 if (unlikely(status)) 886 if (status) {
887 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
888 macb_writel(bp, ISR, MACB_BIT(RCOMP));
903 napi_reschedule(napi); 889 napi_reschedule(napi);
890 } else {
891 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
892 }
904 } 893 }
905 894
906 /* TODO: Handle errors */ 895 /* TODO: Handle errors */
@@ -951,6 +940,10 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
951 if (unlikely(status & (MACB_TX_ERR_FLAGS))) { 940 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
952 macb_writel(bp, IDR, MACB_TX_INT_FLAGS); 941 macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
953 schedule_work(&bp->tx_error_task); 942 schedule_work(&bp->tx_error_task);
943
944 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
945 macb_writel(bp, ISR, MACB_TX_ERR_FLAGS);
946
954 break; 947 break;
955 } 948 }
956 949
@@ -968,6 +961,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
968 bp->hw_stats.gem.rx_overruns++; 961 bp->hw_stats.gem.rx_overruns++;
969 else 962 else
970 bp->hw_stats.macb.rx_overruns++; 963 bp->hw_stats.macb.rx_overruns++;
964
965 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
966 macb_writel(bp, ISR, MACB_BIT(ISR_ROVR));
971 } 967 }
972 968
973 if (status & MACB_BIT(HRESP)) { 969 if (status & MACB_BIT(HRESP)) {
@@ -977,6 +973,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
977 * (work queue?) 973 * (work queue?)
978 */ 974 */
979 netdev_err(dev, "DMA bus error: HRESP not OK\n"); 975 netdev_err(dev, "DMA bus error: HRESP not OK\n");
976
977 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
978 macb_writel(bp, ISR, MACB_BIT(HRESP));
980 } 979 }
981 980
982 status = macb_readl(bp, ISR); 981 status = macb_readl(bp, ISR);
@@ -1113,7 +1112,7 @@ static void gem_free_rx_buffers(struct macb *bp)
1113 1112
1114 desc = &bp->rx_ring[i]; 1113 desc = &bp->rx_ring[i];
1115 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 1114 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1116 dma_unmap_single(&bp->pdev->dev, addr, skb->len, 1115 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1117 DMA_FROM_DEVICE); 1116 DMA_FROM_DEVICE);
1118 dev_kfree_skb_any(skb); 1117 dev_kfree_skb_any(skb);
1119 skb = NULL; 1118 skb = NULL;
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index d40c994a4f6a..570222c33410 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -67,13 +67,13 @@ config CHELSIO_T3
67 will be called cxgb3. 67 will be called cxgb3.
68 68
69config CHELSIO_T4 69config CHELSIO_T4
70 tristate "Chelsio Communications T4 Ethernet support" 70 tristate "Chelsio Communications T4/T5 Ethernet support"
71 depends on PCI 71 depends on PCI
72 select FW_LOADER 72 select FW_LOADER
73 select MDIO 73 select MDIO
74 ---help--- 74 ---help---
75 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet 75 This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
76 adapters. 76 adapter and T5 based 40Gb Ethernet adapter.
77 77
78 For general information about Chelsio and our products, visit 78 For general information about Chelsio and our products, visit
79 our website at <http://www.chelsio.com>. 79 our website at <http://www.chelsio.com>.
@@ -87,11 +87,12 @@ config CHELSIO_T4
87 will be called cxgb4. 87 will be called cxgb4.
88 88
89config CHELSIO_T4VF 89config CHELSIO_T4VF
90 tristate "Chelsio Communications T4 Virtual Function Ethernet support" 90 tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support"
91 depends on PCI 91 depends on PCI
92 ---help--- 92 ---help---
93 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet 93 This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
94 adapters with PCI-E SR-IOV Virtual Functions. 94 adapters and T5 based 40Gb Ethernet adapters with PCI-E SR-IOV Virtual
95 Functions.
95 96
96 For general information about Chelsio and our products, visit 97 For general information about Chelsio and our products, visit
97 our website at <http://www.chelsio.com>. 98 our website at <http://www.chelsio.com>.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6fe58913403a..24e16e3301e0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5870,6 +5870,8 @@ static void print_port_info(const struct net_device *dev)
5870 spd = " 2.5 GT/s"; 5870 spd = " 2.5 GT/s";
5871 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) 5871 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5872 spd = " 5 GT/s"; 5872 spd = " 5 GT/s";
5873 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
5874 spd = " 8 GT/s";
5873 5875
5874 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) 5876 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5875 bufp += sprintf(bufp, "100/"); 5877 bufp += sprintf(bufp, "100/");
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 81e8402a74b4..8a96572fdde0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -154,7 +154,7 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
154 req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); 154 req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
155 req->l2t_idx = htons(e->idx); 155 req->l2t_idx = htons(e->idx);
156 req->vlan = htons(e->vlan); 156 req->vlan = htons(e->vlan);
157 if (e->neigh) 157 if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
158 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); 158 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
159 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); 159 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
160 160
@@ -394,6 +394,8 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
394 if (e) { 394 if (e) {
395 spin_lock(&e->lock); /* avoid race with t4_l2t_free */ 395 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
396 e->state = L2T_STATE_RESOLVING; 396 e->state = L2T_STATE_RESOLVING;
397 if (neigh->dev->flags & IFF_LOOPBACK)
398 memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac));
397 memcpy(e->addr, addr, addr_len); 399 memcpy(e->addr, addr, addr_len);
398 e->ifindex = ifidx; 400 e->ifindex = ifidx;
399 e->hash = hash; 401 e->hash = hash;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index fb2fe65903c2..bba67681aeaa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -682,7 +682,7 @@ enum {
682 SF_RD_ID = 0x9f, /* read ID */ 682 SF_RD_ID = 0x9f, /* read ID */
683 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 683 SF_ERASE_SECTOR = 0xd8, /* erase sector */
684 684
685 FW_MAX_SIZE = 512 * 1024, 685 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
686}; 686};
687 687
688/** 688/**
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
new file mode 100644
index 000000000000..4884205e56ee
--- /dev/null
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -0,0 +1,706 @@
1 /*
2 * drivers/net/ethernet/beckhoff/ec_bhf.c
3 *
4 * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17/* This is a driver for EtherCAT master module present on CCAT FPGA.
18 * Those can be found on Bechhoff CX50xx industrial PCs.
19 */
20
21#if 0
22#define DEBUG
23#endif
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/ip.h>
33#include <linux/skbuff.h>
34#include <linux/hrtimer.h>
35#include <linux/interrupt.h>
36#include <linux/stat.h>
37
38#define TIMER_INTERVAL_NSEC 20000
39
40#define INFO_BLOCK_SIZE 0x10
41#define INFO_BLOCK_TYPE 0x0
42#define INFO_BLOCK_REV 0x2
43#define INFO_BLOCK_BLK_CNT 0x4
44#define INFO_BLOCK_TX_CHAN 0x4
45#define INFO_BLOCK_RX_CHAN 0x5
46#define INFO_BLOCK_OFFSET 0x8
47
48#define EC_MII_OFFSET 0x4
49#define EC_FIFO_OFFSET 0x8
50#define EC_MAC_OFFSET 0xc
51
52#define MAC_FRAME_ERR_CNT 0x0
53#define MAC_RX_ERR_CNT 0x1
54#define MAC_CRC_ERR_CNT 0x2
55#define MAC_LNK_LST_ERR_CNT 0x3
56#define MAC_TX_FRAME_CNT 0x10
57#define MAC_RX_FRAME_CNT 0x14
58#define MAC_TX_FIFO_LVL 0x20
59#define MAC_DROPPED_FRMS 0x28
60#define MAC_CONNECTED_CCAT_FLAG 0x78
61
62#define MII_MAC_ADDR 0x8
63#define MII_MAC_FILT_FLAG 0xe
64#define MII_LINK_STATUS 0xf
65
66#define FIFO_TX_REG 0x0
67#define FIFO_TX_RESET 0x8
68#define FIFO_RX_REG 0x10
69#define FIFO_RX_ADDR_VALID (1u << 31)
70#define FIFO_RX_RESET 0x18
71
72#define DMA_CHAN_OFFSET 0x1000
73#define DMA_CHAN_SIZE 0x8
74
75#define DMA_WINDOW_SIZE_MASK 0xfffffffc
76
77static struct pci_device_id ids[] = {
78 { PCI_DEVICE(0x15ec, 0x5000), },
79 { 0, }
80};
81MODULE_DEVICE_TABLE(pci, ids);
82
83struct rx_header {
84#define RXHDR_NEXT_ADDR_MASK 0xffffffu
85#define RXHDR_NEXT_VALID (1u << 31)
86 __le32 next;
87#define RXHDR_NEXT_RECV_FLAG 0x1
88 __le32 recv;
89#define RXHDR_LEN_MASK 0xfffu
90 __le16 len;
91 __le16 port;
92 __le32 reserved;
93 u8 timestamp[8];
94} __packed;
95
96#define PKT_PAYLOAD_SIZE 0x7e8
97struct rx_desc {
98 struct rx_header header;
99 u8 data[PKT_PAYLOAD_SIZE];
100} __packed;
101
102struct tx_header {
103 __le16 len;
104#define TX_HDR_PORT_0 0x1
105#define TX_HDR_PORT_1 0x2
106 u8 port;
107 u8 ts_enable;
108#define TX_HDR_SENT 0x1
109 __le32 sent;
110 u8 timestamp[8];
111} __packed;
112
113struct tx_desc {
114 struct tx_header header;
115 u8 data[PKT_PAYLOAD_SIZE];
116} __packed;
117
118#define FIFO_SIZE 64
119
120static long polling_frequency = TIMER_INTERVAL_NSEC;
121
122struct bhf_dma {
123 u8 *buf;
124 size_t len;
125 dma_addr_t buf_phys;
126
127 u8 *alloc;
128 size_t alloc_len;
129 dma_addr_t alloc_phys;
130};
131
132struct ec_bhf_priv {
133 struct net_device *net_dev;
134
135 struct pci_dev *dev;
136
137 void * __iomem io;
138 void * __iomem dma_io;
139
140 struct hrtimer hrtimer;
141
142 int tx_dma_chan;
143 int rx_dma_chan;
144 void * __iomem ec_io;
145 void * __iomem fifo_io;
146 void * __iomem mii_io;
147 void * __iomem mac_io;
148
149 struct bhf_dma rx_buf;
150 struct rx_desc *rx_descs;
151 int rx_dnext;
152 int rx_dcount;
153
154 struct bhf_dma tx_buf;
155 struct tx_desc *tx_descs;
156 int tx_dcount;
157 int tx_dnext;
158
159 u64 stat_rx_bytes;
160 u64 stat_tx_bytes;
161};
162
163#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
164
165#define ETHERCAT_MASTER_ID 0x14
166
167static void ec_bhf_print_status(struct ec_bhf_priv *priv)
168{
169 struct device *dev = PRIV_TO_DEV(priv);
170
171 dev_dbg(dev, "Frame error counter: %d\n",
172 ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
173 dev_dbg(dev, "RX error counter: %d\n",
174 ioread8(priv->mac_io + MAC_RX_ERR_CNT));
175 dev_dbg(dev, "CRC error counter: %d\n",
176 ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
177 dev_dbg(dev, "TX frame counter: %d\n",
178 ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
179 dev_dbg(dev, "RX frame counter: %d\n",
180 ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
181 dev_dbg(dev, "TX fifo level: %d\n",
182 ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
183 dev_dbg(dev, "Dropped frames: %d\n",
184 ioread8(priv->mac_io + MAC_DROPPED_FRMS));
185 dev_dbg(dev, "Connected with CCAT slot: %d\n",
186 ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
187 dev_dbg(dev, "Link status: %d\n",
188 ioread8(priv->mii_io + MII_LINK_STATUS));
189}
190
191static void ec_bhf_reset(struct ec_bhf_priv *priv)
192{
193 iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
194 iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
195 iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
196 iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
197 iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
198 iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
199 iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
200
201 iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
202 iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
203
204 iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
205}
206
207static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
208{
209 u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
210 u32 addr = (u8 *)desc - priv->tx_buf.buf;
211
212 iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
213
214 dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
215}
216
217static int ec_bhf_desc_sent(struct tx_desc *desc)
218{
219 return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
220}
221
222static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
223{
224 if (unlikely(netif_queue_stopped(priv->net_dev))) {
225 /* Make sure that we perceive changes to tx_dnext. */
226 smp_rmb();
227
228 if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
229 netif_wake_queue(priv->net_dev);
230 }
231}
232
233static int ec_bhf_pkt_received(struct rx_desc *desc)
234{
235 return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
236}
237
238static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
239{
240 iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
241 priv->fifo_io + FIFO_RX_REG);
242}
243
244static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
245{
246 struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
247 struct device *dev = PRIV_TO_DEV(priv);
248
249 while (ec_bhf_pkt_received(desc)) {
250 int pkt_size = (le16_to_cpu(desc->header.len) &
251 RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
252 u8 *data = desc->data;
253 struct sk_buff *skb;
254
255 skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
256 dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
257
258 if (skb) {
259 memcpy(skb_put(skb, pkt_size), data, pkt_size);
260 skb->protocol = eth_type_trans(skb, priv->net_dev);
261 dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
262
263 priv->stat_rx_bytes += pkt_size;
264
265 netif_rx(skb);
266 } else {
267 dev_err_ratelimited(dev,
268 "Couldn't allocate a skb_buff for a packet of size %u\n",
269 pkt_size);
270 }
271
272 desc->header.recv = 0;
273
274 ec_bhf_add_rx_desc(priv, desc);
275
276 priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
277 desc = &priv->rx_descs[priv->rx_dnext];
278 }
279
280}
281
282static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
283{
284 struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
285 hrtimer);
286 ec_bhf_process_rx(priv);
287 ec_bhf_process_tx(priv);
288
289 if (!netif_running(priv->net_dev))
290 return HRTIMER_NORESTART;
291
292 hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
293 return HRTIMER_RESTART;
294}
295
296static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
297{
298 struct device *dev = PRIV_TO_DEV(priv);
299 unsigned block_count, i;
300 void * __iomem ec_info;
301
302 dev_dbg(dev, "Info block:\n");
303 dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
304 dev_dbg(dev, "Revision of function: %x\n",
305 (unsigned)ioread16(priv->io + INFO_BLOCK_REV));
306
307 block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
308 dev_dbg(dev, "Number of function blocks: %x\n", block_count);
309
310 for (i = 0; i < block_count; i++) {
311 u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
312 INFO_BLOCK_TYPE);
313 if (type == ETHERCAT_MASTER_ID)
314 break;
315 }
316 if (i == block_count) {
317 dev_err(dev, "EtherCAT master with DMA block not found\n");
318 return -ENODEV;
319 }
320 dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
321
322 ec_info = priv->io + i * INFO_BLOCK_SIZE;
323 dev_dbg(dev, "EtherCAT master revision: %d\n",
324 ioread16(ec_info + INFO_BLOCK_REV));
325
326 priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
327 dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
328 priv->tx_dma_chan);
329
330 priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
331 dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
332 priv->rx_dma_chan);
333
334 priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
335 priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
336 priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
337 priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
338
339 dev_dbg(dev,
340 "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
341 priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
342
343 return 0;
344}
345
346static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
347 struct net_device *net_dev)
348{
349 struct ec_bhf_priv *priv = netdev_priv(net_dev);
350 struct tx_desc *desc;
351 unsigned len;
352
353 dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
354
355 desc = &priv->tx_descs[priv->tx_dnext];
356
357 skb_copy_and_csum_dev(skb, desc->data);
358 len = skb->len;
359
360 memset(&desc->header, 0, sizeof(desc->header));
361 desc->header.len = cpu_to_le16(len);
362 desc->header.port = TX_HDR_PORT_0;
363
364 ec_bhf_send_packet(priv, desc);
365
366 priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
367
368 if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
369 /* Make sure that update updates to tx_dnext are perceived
370 * by timer routine.
371 */
372 smp_wmb();
373
374 netif_stop_queue(net_dev);
375
376 dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
377 ec_bhf_print_status(priv);
378 }
379
380 priv->stat_tx_bytes += len;
381
382 dev_kfree_skb(skb);
383
384 return NETDEV_TX_OK;
385}
386
387static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
388 struct bhf_dma *buf,
389 int channel,
390 int size)
391{
392 int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
393 struct device *dev = PRIV_TO_DEV(priv);
394 u32 mask;
395
396 iowrite32(0xffffffff, priv->dma_io + offset);
397
398 mask = ioread32(priv->dma_io + offset);
399 mask &= DMA_WINDOW_SIZE_MASK;
400 dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
401
402 /* We want to allocate a chunk of memory that is:
403 * - aligned to the mask we just read
404 * - is of size 2^mask bytes (at most)
405 * In order to ensure that we will allocate buffer of
406 * 2 * 2^mask bytes.
407 */
408 buf->len = min_t(int, ~mask + 1, size);
409 buf->alloc_len = 2 * buf->len;
410
411 dev_dbg(dev, "Allocating %d bytes for channel %d",
412 (int)buf->alloc_len, channel);
413 buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
414 GFP_KERNEL);
415 if (buf->alloc == NULL) {
416 dev_info(dev, "Failed to allocate buffer\n");
417 return -ENOMEM;
418 }
419
420 buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
421 buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
422
423 iowrite32(0, priv->dma_io + offset + 4);
424 iowrite32(buf->buf_phys, priv->dma_io + offset);
425 dev_dbg(dev, "Buffer: %x and read from dev: %x",
426 (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
427
428 return 0;
429}
430
431static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
432{
433 int i = 0;
434
435 priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
436 priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf;
437 priv->tx_dnext = 0;
438
439 for (i = 0; i < priv->tx_dcount; i++)
440 priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
441}
442
443static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
444{
445 int i;
446
447 priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
448 priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf;
449 priv->rx_dnext = 0;
450
451 for (i = 0; i < priv->rx_dcount; i++) {
452 struct rx_desc *desc = &priv->rx_descs[i];
453 u32 next;
454
455 if (i != priv->rx_dcount - 1)
456 next = (u8 *)(desc + 1) - priv->rx_buf.buf;
457 else
458 next = 0;
459 next |= RXHDR_NEXT_VALID;
460 desc->header.next = cpu_to_le32(next);
461 desc->header.recv = 0;
462 ec_bhf_add_rx_desc(priv, desc);
463 }
464}
465
466static int ec_bhf_open(struct net_device *net_dev)
467{
468 struct ec_bhf_priv *priv = netdev_priv(net_dev);
469 struct device *dev = PRIV_TO_DEV(priv);
470 int err = 0;
471
472 dev_info(dev, "Opening device\n");
473
474 ec_bhf_reset(priv);
475
476 err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
477 FIFO_SIZE * sizeof(struct rx_desc));
478 if (err) {
479 dev_err(dev, "Failed to allocate rx buffer\n");
480 goto out;
481 }
482 ec_bhf_setup_rx_descs(priv);
483
484 dev_info(dev, "RX buffer allocated, address: %x\n",
485 (unsigned)priv->rx_buf.buf_phys);
486
487 err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
488 FIFO_SIZE * sizeof(struct tx_desc));
489 if (err) {
490 dev_err(dev, "Failed to allocate tx buffer\n");
491 goto error_rx_free;
492 }
493 dev_dbg(dev, "TX buffer allocated, addres: %x\n",
494 (unsigned)priv->tx_buf.buf_phys);
495
496 iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
497
498 ec_bhf_setup_tx_descs(priv);
499
500 netif_start_queue(net_dev);
501
502 hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
503 priv->hrtimer.function = ec_bhf_timer_fun;
504 hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
505 HRTIMER_MODE_REL);
506
507 dev_info(PRIV_TO_DEV(priv), "Device open\n");
508
509 ec_bhf_print_status(priv);
510
511 return 0;
512
513error_rx_free:
514 dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
515 priv->rx_buf.alloc_len);
516out:
517 return err;
518}
519
520static int ec_bhf_stop(struct net_device *net_dev)
521{
522 struct ec_bhf_priv *priv = netdev_priv(net_dev);
523 struct device *dev = PRIV_TO_DEV(priv);
524
525 hrtimer_cancel(&priv->hrtimer);
526
527 ec_bhf_reset(priv);
528
529 netif_tx_disable(net_dev);
530
531 dma_free_coherent(dev, priv->tx_buf.alloc_len,
532 priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
533 dma_free_coherent(dev, priv->rx_buf.alloc_len,
534 priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
535
536 return 0;
537}
538
539static struct rtnl_link_stats64 *
540ec_bhf_get_stats(struct net_device *net_dev,
541 struct rtnl_link_stats64 *stats)
542{
543 struct ec_bhf_priv *priv = netdev_priv(net_dev);
544
545 stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
546 ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
547 ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
548 stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
549 stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
550 stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
551
552 stats->tx_bytes = priv->stat_tx_bytes;
553 stats->rx_bytes = priv->stat_rx_bytes;
554
555 return stats;
556}
557
558static const struct net_device_ops ec_bhf_netdev_ops = {
559 .ndo_start_xmit = ec_bhf_start_xmit,
560 .ndo_open = ec_bhf_open,
561 .ndo_stop = ec_bhf_stop,
562 .ndo_get_stats64 = ec_bhf_get_stats,
563 .ndo_change_mtu = eth_change_mtu,
564 .ndo_validate_addr = eth_validate_addr,
565 .ndo_set_mac_address = eth_mac_addr
566};
567
568static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
569{
570 struct net_device *net_dev;
571 struct ec_bhf_priv *priv;
572 void * __iomem dma_io;
573 void * __iomem io;
574 int err = 0;
575
576 err = pci_enable_device(dev);
577 if (err)
578 return err;
579
580 pci_set_master(dev);
581
582 err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
583 if (err) {
584 dev_err(&dev->dev,
585 "Required dma mask not supported, failed to initialize device\n");
586 err = -EIO;
587 goto err_disable_dev;
588 }
589
590 err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
591 if (err) {
592 dev_err(&dev->dev,
593 "Required dma mask not supported, failed to initialize device\n");
594 goto err_disable_dev;
595 }
596
597 err = pci_request_regions(dev, "ec_bhf");
598 if (err) {
599 dev_err(&dev->dev, "Failed to request pci memory regions\n");
600 goto err_disable_dev;
601 }
602
603 io = pci_iomap(dev, 0, 0);
604 if (!io) {
605 dev_err(&dev->dev, "Failed to map pci card memory bar 0");
606 err = -EIO;
607 goto err_release_regions;
608 }
609
610 dma_io = pci_iomap(dev, 2, 0);
611 if (!dma_io) {
612 dev_err(&dev->dev, "Failed to map pci card memory bar 2");
613 err = -EIO;
614 goto err_unmap;
615 }
616
617 net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
618 if (net_dev == 0) {
619 err = -ENOMEM;
620 goto err_unmap_dma_io;
621 }
622
623 pci_set_drvdata(dev, net_dev);
624 SET_NETDEV_DEV(net_dev, &dev->dev);
625
626 net_dev->features = 0;
627 net_dev->flags |= IFF_NOARP;
628
629 net_dev->netdev_ops = &ec_bhf_netdev_ops;
630
631 priv = netdev_priv(net_dev);
632 priv->net_dev = net_dev;
633 priv->io = io;
634 priv->dma_io = dma_io;
635 priv->dev = dev;
636
637 err = ec_bhf_setup_offsets(priv);
638 if (err < 0)
639 goto err_free_net_dev;
640
641 memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
642
643 dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
644 net_dev->dev_addr);
645
646 err = register_netdev(net_dev);
647 if (err < 0)
648 goto err_free_net_dev;
649
650 return 0;
651
652err_free_net_dev:
653 free_netdev(net_dev);
654err_unmap_dma_io:
655 pci_iounmap(dev, dma_io);
656err_unmap:
657 pci_iounmap(dev, io);
658err_release_regions:
659 pci_release_regions(dev);
660err_disable_dev:
661 pci_clear_master(dev);
662 pci_disable_device(dev);
663
664 return err;
665}
666
667static void ec_bhf_remove(struct pci_dev *dev)
668{
669 struct net_device *net_dev = pci_get_drvdata(dev);
670 struct ec_bhf_priv *priv = netdev_priv(net_dev);
671
672 unregister_netdev(net_dev);
673 free_netdev(net_dev);
674
675 pci_iounmap(dev, priv->dma_io);
676 pci_iounmap(dev, priv->io);
677 pci_release_regions(dev);
678 pci_clear_master(dev);
679 pci_disable_device(dev);
680}
681
682static struct pci_driver pci_driver = {
683 .name = "ec_bhf",
684 .id_table = ids,
685 .probe = ec_bhf_probe,
686 .remove = ec_bhf_remove,
687};
688
689static int __init ec_bhf_init(void)
690{
691 return pci_register_driver(&pci_driver);
692}
693
694static void __exit ec_bhf_exit(void)
695{
696 pci_unregister_driver(&pci_driver);
697}
698
699module_init(ec_bhf_init);
700module_exit(ec_bhf_exit);
701
702module_param(polling_frequency, long, S_IRUGO);
703MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
704
705MODULE_LICENSE("GPL");
706MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8ccaa2520dc3..97db5a7179df 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -374,6 +374,7 @@ enum vf_state {
374#define BE_FLAGS_NAPI_ENABLED (1 << 9) 374#define BE_FLAGS_NAPI_ENABLED (1 << 9)
375#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) 375#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
376#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12) 376#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12)
377#define BE_FLAGS_SETUP_DONE (1 << 13)
377 378
378#define BE_UC_PMAC_COUNT 30 379#define BE_UC_PMAC_COUNT 30
379#define BE_VF_UC_PMAC_COUNT 2 380#define BE_VF_UC_PMAC_COUNT 2
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3e6df47b6973..dc19bc5dec77 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2033,11 +2033,13 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
2033 bool dummy_wrb; 2033 bool dummy_wrb;
2034 int i, pending_txqs; 2034 int i, pending_txqs;
2035 2035
2036 /* Wait for a max of 200ms for all the tx-completions to arrive. */ 2036 /* Stop polling for compls when HW has been silent for 10ms */
2037 do { 2037 do {
2038 pending_txqs = adapter->num_tx_qs; 2038 pending_txqs = adapter->num_tx_qs;
2039 2039
2040 for_all_tx_queues(adapter, txo, i) { 2040 for_all_tx_queues(adapter, txo, i) {
2041 cmpl = 0;
2042 num_wrbs = 0;
2041 txq = &txo->q; 2043 txq = &txo->q;
2042 while ((txcp = be_tx_compl_get(&txo->cq))) { 2044 while ((txcp = be_tx_compl_get(&txo->cq))) {
2043 end_idx = 2045 end_idx =
@@ -2050,14 +2052,13 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
2050 if (cmpl) { 2052 if (cmpl) {
2051 be_cq_notify(adapter, txo->cq.id, false, cmpl); 2053 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2052 atomic_sub(num_wrbs, &txq->used); 2054 atomic_sub(num_wrbs, &txq->used);
2053 cmpl = 0; 2055 timeo = 0;
2054 num_wrbs = 0;
2055 } 2056 }
2056 if (atomic_read(&txq->used) == 0) 2057 if (atomic_read(&txq->used) == 0)
2057 pending_txqs--; 2058 pending_txqs--;
2058 } 2059 }
2059 2060
2060 if (pending_txqs == 0 || ++timeo > 200) 2061 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
2061 break; 2062 break;
2062 2063
2063 mdelay(1); 2064 mdelay(1);
@@ -2725,6 +2726,12 @@ static int be_close(struct net_device *netdev)
2725 struct be_eq_obj *eqo; 2726 struct be_eq_obj *eqo;
2726 int i; 2727 int i;
2727 2728
2729 /* This protection is needed as be_close() may be called even when the
2730 * adapter is in cleared state (after eeh perm failure)
2731 */
2732 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2733 return 0;
2734
2728 be_roce_dev_close(adapter); 2735 be_roce_dev_close(adapter);
2729 2736
2730 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { 2737 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3055,6 +3062,7 @@ static int be_clear(struct be_adapter *adapter)
3055 be_clear_queues(adapter); 3062 be_clear_queues(adapter);
3056 3063
3057 be_msix_disable(adapter); 3064 be_msix_disable(adapter);
3065 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
3058 return 0; 3066 return 0;
3059} 3067}
3060 3068
@@ -3559,6 +3567,7 @@ static int be_setup(struct be_adapter *adapter)
3559 adapter->phy.fc_autoneg = 1; 3567 adapter->phy.fc_autoneg = 1;
3560 3568
3561 be_schedule_worker(adapter); 3569 be_schedule_worker(adapter);
3570 adapter->flags |= BE_FLAGS_SETUP_DONE;
3562 return 0; 3571 return 0;
3563err: 3572err:
3564 be_clear(adapter); 3573 be_clear(adapter);
@@ -4940,6 +4949,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
4940 if (status) 4949 if (status)
4941 goto err; 4950 goto err;
4942 4951
4952 /* On some BE3 FW versions, after a HW reset,
4953 * interrupts will remain disabled for each function.
4954 * So, explicitly enable interrupts
4955 */
4956 be_intr_set(adapter, true);
4957
4943 /* tell fw we're ready to fire cmds */ 4958 /* tell fw we're ready to fire cmds */
4944 status = be_cmd_fw_init(adapter); 4959 status = be_cmd_fw_init(adapter);
4945 if (status) 4960 if (status)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 9125d9abf099..e2d42475b006 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -121,6 +121,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id);
121static irqreturn_t gfar_transmit(int irq, void *dev_id); 121static irqreturn_t gfar_transmit(int irq, void *dev_id);
122static irqreturn_t gfar_interrupt(int irq, void *dev_id); 122static irqreturn_t gfar_interrupt(int irq, void *dev_id);
123static void adjust_link(struct net_device *dev); 123static void adjust_link(struct net_device *dev);
124static noinline void gfar_update_link_state(struct gfar_private *priv);
124static int init_phy(struct net_device *dev); 125static int init_phy(struct net_device *dev);
125static int gfar_probe(struct platform_device *ofdev); 126static int gfar_probe(struct platform_device *ofdev);
126static int gfar_remove(struct platform_device *ofdev); 127static int gfar_remove(struct platform_device *ofdev);
@@ -3076,41 +3077,6 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3076 return IRQ_HANDLED; 3077 return IRQ_HANDLED;
3077} 3078}
3078 3079
3079static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3080{
3081 struct phy_device *phydev = priv->phydev;
3082 u32 val = 0;
3083
3084 if (!phydev->duplex)
3085 return val;
3086
3087 if (!priv->pause_aneg_en) {
3088 if (priv->tx_pause_en)
3089 val |= MACCFG1_TX_FLOW;
3090 if (priv->rx_pause_en)
3091 val |= MACCFG1_RX_FLOW;
3092 } else {
3093 u16 lcl_adv, rmt_adv;
3094 u8 flowctrl;
3095 /* get link partner capabilities */
3096 rmt_adv = 0;
3097 if (phydev->pause)
3098 rmt_adv = LPA_PAUSE_CAP;
3099 if (phydev->asym_pause)
3100 rmt_adv |= LPA_PAUSE_ASYM;
3101
3102 lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3103
3104 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3105 if (flowctrl & FLOW_CTRL_TX)
3106 val |= MACCFG1_TX_FLOW;
3107 if (flowctrl & FLOW_CTRL_RX)
3108 val |= MACCFG1_RX_FLOW;
3109 }
3110
3111 return val;
3112}
3113
3114/* Called every time the controller might need to be made 3080/* Called every time the controller might need to be made
3115 * aware of new link state. The PHY code conveys this 3081 * aware of new link state. The PHY code conveys this
3116 * information through variables in the phydev structure, and this 3082 * information through variables in the phydev structure, and this
@@ -3120,83 +3086,12 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3120static void adjust_link(struct net_device *dev) 3086static void adjust_link(struct net_device *dev)
3121{ 3087{
3122 struct gfar_private *priv = netdev_priv(dev); 3088 struct gfar_private *priv = netdev_priv(dev);
3123 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3124 struct phy_device *phydev = priv->phydev; 3089 struct phy_device *phydev = priv->phydev;
3125 int new_state = 0;
3126 3090
3127 if (test_bit(GFAR_RESETTING, &priv->state)) 3091 if (unlikely(phydev->link != priv->oldlink ||
3128 return; 3092 phydev->duplex != priv->oldduplex ||
3129 3093 phydev->speed != priv->oldspeed))
3130 if (phydev->link) { 3094 gfar_update_link_state(priv);
3131 u32 tempval1 = gfar_read(&regs->maccfg1);
3132 u32 tempval = gfar_read(&regs->maccfg2);
3133 u32 ecntrl = gfar_read(&regs->ecntrl);
3134
3135 /* Now we make sure that we can be in full duplex mode.
3136 * If not, we operate in half-duplex mode.
3137 */
3138 if (phydev->duplex != priv->oldduplex) {
3139 new_state = 1;
3140 if (!(phydev->duplex))
3141 tempval &= ~(MACCFG2_FULL_DUPLEX);
3142 else
3143 tempval |= MACCFG2_FULL_DUPLEX;
3144
3145 priv->oldduplex = phydev->duplex;
3146 }
3147
3148 if (phydev->speed != priv->oldspeed) {
3149 new_state = 1;
3150 switch (phydev->speed) {
3151 case 1000:
3152 tempval =
3153 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3154
3155 ecntrl &= ~(ECNTRL_R100);
3156 break;
3157 case 100:
3158 case 10:
3159 tempval =
3160 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3161
3162 /* Reduced mode distinguishes
3163 * between 10 and 100
3164 */
3165 if (phydev->speed == SPEED_100)
3166 ecntrl |= ECNTRL_R100;
3167 else
3168 ecntrl &= ~(ECNTRL_R100);
3169 break;
3170 default:
3171 netif_warn(priv, link, dev,
3172 "Ack! Speed (%d) is not 10/100/1000!\n",
3173 phydev->speed);
3174 break;
3175 }
3176
3177 priv->oldspeed = phydev->speed;
3178 }
3179
3180 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3181 tempval1 |= gfar_get_flowctrl_cfg(priv);
3182
3183 gfar_write(&regs->maccfg1, tempval1);
3184 gfar_write(&regs->maccfg2, tempval);
3185 gfar_write(&regs->ecntrl, ecntrl);
3186
3187 if (!priv->oldlink) {
3188 new_state = 1;
3189 priv->oldlink = 1;
3190 }
3191 } else if (priv->oldlink) {
3192 new_state = 1;
3193 priv->oldlink = 0;
3194 priv->oldspeed = 0;
3195 priv->oldduplex = -1;
3196 }
3197
3198 if (new_state && netif_msg_link(priv))
3199 phy_print_status(phydev);
3200} 3095}
3201 3096
3202/* Update the hash table based on the current list of multicast 3097/* Update the hash table based on the current list of multicast
@@ -3442,6 +3337,114 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
3442 return IRQ_HANDLED; 3337 return IRQ_HANDLED;
3443} 3338}
3444 3339
3340static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3341{
3342 struct phy_device *phydev = priv->phydev;
3343 u32 val = 0;
3344
3345 if (!phydev->duplex)
3346 return val;
3347
3348 if (!priv->pause_aneg_en) {
3349 if (priv->tx_pause_en)
3350 val |= MACCFG1_TX_FLOW;
3351 if (priv->rx_pause_en)
3352 val |= MACCFG1_RX_FLOW;
3353 } else {
3354 u16 lcl_adv, rmt_adv;
3355 u8 flowctrl;
3356 /* get link partner capabilities */
3357 rmt_adv = 0;
3358 if (phydev->pause)
3359 rmt_adv = LPA_PAUSE_CAP;
3360 if (phydev->asym_pause)
3361 rmt_adv |= LPA_PAUSE_ASYM;
3362
3363 lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3364
3365 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3366 if (flowctrl & FLOW_CTRL_TX)
3367 val |= MACCFG1_TX_FLOW;
3368 if (flowctrl & FLOW_CTRL_RX)
3369 val |= MACCFG1_RX_FLOW;
3370 }
3371
3372 return val;
3373}
3374
3375static noinline void gfar_update_link_state(struct gfar_private *priv)
3376{
3377 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3378 struct phy_device *phydev = priv->phydev;
3379
3380 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3381 return;
3382
3383 if (phydev->link) {
3384 u32 tempval1 = gfar_read(&regs->maccfg1);
3385 u32 tempval = gfar_read(&regs->maccfg2);
3386 u32 ecntrl = gfar_read(&regs->ecntrl);
3387
3388 if (phydev->duplex != priv->oldduplex) {
3389 if (!(phydev->duplex))
3390 tempval &= ~(MACCFG2_FULL_DUPLEX);
3391 else
3392 tempval |= MACCFG2_FULL_DUPLEX;
3393
3394 priv->oldduplex = phydev->duplex;
3395 }
3396
3397 if (phydev->speed != priv->oldspeed) {
3398 switch (phydev->speed) {
3399 case 1000:
3400 tempval =
3401 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3402
3403 ecntrl &= ~(ECNTRL_R100);
3404 break;
3405 case 100:
3406 case 10:
3407 tempval =
3408 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3409
3410 /* Reduced mode distinguishes
3411 * between 10 and 100
3412 */
3413 if (phydev->speed == SPEED_100)
3414 ecntrl |= ECNTRL_R100;
3415 else
3416 ecntrl &= ~(ECNTRL_R100);
3417 break;
3418 default:
3419 netif_warn(priv, link, priv->ndev,
3420 "Ack! Speed (%d) is not 10/100/1000!\n",
3421 phydev->speed);
3422 break;
3423 }
3424
3425 priv->oldspeed = phydev->speed;
3426 }
3427
3428 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3429 tempval1 |= gfar_get_flowctrl_cfg(priv);
3430
3431 gfar_write(&regs->maccfg1, tempval1);
3432 gfar_write(&regs->maccfg2, tempval);
3433 gfar_write(&regs->ecntrl, ecntrl);
3434
3435 if (!priv->oldlink)
3436 priv->oldlink = 1;
3437
3438 } else if (priv->oldlink) {
3439 priv->oldlink = 0;
3440 priv->oldspeed = 0;
3441 priv->oldduplex = -1;
3442 }
3443
3444 if (netif_msg_link(priv))
3445 phy_print_status(phydev);
3446}
3447
3445static struct of_device_id gfar_match[] = 3448static struct of_device_id gfar_match[] =
3446{ 3449{
3447 { 3450 {
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 891dbee6e6c1..76d70708f864 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -533,6 +533,9 @@ static int gfar_spauseparam(struct net_device *dev,
533 struct gfar __iomem *regs = priv->gfargrp[0].regs; 533 struct gfar __iomem *regs = priv->gfargrp[0].regs;
534 u32 oldadv, newadv; 534 u32 oldadv, newadv;
535 535
536 if (!phydev)
537 return -ENODEV;
538
536 if (!(phydev->supported & SUPPORTED_Pause) || 539 if (!(phydev->supported & SUPPORTED_Pause) ||
537 (!(phydev->supported & SUPPORTED_Asym_Pause) && 540 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
538 (epause->rx_pause != epause->tx_pause))) 541 (epause->rx_pause != epause->tx_pause)))
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9866f264f55e..f0bbd4246d71 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -186,7 +186,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
186{ 186{
187 u16 phy_reg = 0; 187 u16 phy_reg = 0;
188 u32 phy_id = 0; 188 u32 phy_id = 0;
189 s32 ret_val; 189 s32 ret_val = 0;
190 u16 retry_count; 190 u16 retry_count;
191 u32 mac_reg = 0; 191 u32 mac_reg = 0;
192 192
@@ -217,11 +217,13 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
217 /* In case the PHY needs to be in mdio slow mode, 217 /* In case the PHY needs to be in mdio slow mode,
218 * set slow mode and try to get the PHY id again. 218 * set slow mode and try to get the PHY id again.
219 */ 219 */
220 hw->phy.ops.release(hw); 220 if (hw->mac.type < e1000_pch_lpt) {
221 ret_val = e1000_set_mdio_slow_mode_hv(hw); 221 hw->phy.ops.release(hw);
222 if (!ret_val) 222 ret_val = e1000_set_mdio_slow_mode_hv(hw);
223 ret_val = e1000e_get_phy_id(hw); 223 if (!ret_val)
224 hw->phy.ops.acquire(hw); 224 ret_val = e1000e_get_phy_id(hw);
225 hw->phy.ops.acquire(hw);
226 }
225 227
226 if (ret_val) 228 if (ret_val)
227 return false; 229 return false;
@@ -842,6 +844,17 @@ s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
842 } 844 }
843 } 845 }
844 846
847 if (hw->phy.type == e1000_phy_82579) {
848 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
849 &data);
850 if (ret_val)
851 goto release;
852
853 data &= ~I82579_LPI_100_PLL_SHUT;
854 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
855 data);
856 }
857
845 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ 858 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
846 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); 859 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
847 if (ret_val) 860 if (ret_val)
@@ -1314,14 +1327,17 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1314 return ret_val; 1327 return ret_val;
1315 } 1328 }
1316 1329
1317 /* When connected at 10Mbps half-duplex, 82579 parts are excessively 1330 /* When connected at 10Mbps half-duplex, some parts are excessively
1318 * aggressive resulting in many collisions. To avoid this, increase 1331 * aggressive resulting in many collisions. To avoid this, increase
1319 * the IPG and reduce Rx latency in the PHY. 1332 * the IPG and reduce Rx latency in the PHY.
1320 */ 1333 */
1321 if ((hw->mac.type == e1000_pch2lan) && link) { 1334 if (((hw->mac.type == e1000_pch2lan) ||
1335 (hw->mac.type == e1000_pch_lpt)) && link) {
1322 u32 reg; 1336 u32 reg;
1323 reg = er32(STATUS); 1337 reg = er32(STATUS);
1324 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { 1338 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1339 u16 emi_addr;
1340
1325 reg = er32(TIPG); 1341 reg = er32(TIPG);
1326 reg &= ~E1000_TIPG_IPGT_MASK; 1342 reg &= ~E1000_TIPG_IPGT_MASK;
1327 reg |= 0xFF; 1343 reg |= 0xFF;
@@ -1332,8 +1348,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1332 if (ret_val) 1348 if (ret_val)
1333 return ret_val; 1349 return ret_val;
1334 1350
1335 ret_val = 1351 if (hw->mac.type == e1000_pch2lan)
1336 e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0); 1352 emi_addr = I82579_RX_CONFIG;
1353 else
1354 emi_addr = I217_RX_CONFIG;
1355
1356 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
1337 1357
1338 hw->phy.ops.release(hw); 1358 hw->phy.ops.release(hw);
1339 1359
@@ -2493,51 +2513,44 @@ release:
2493 * e1000_k1_gig_workaround_lv - K1 Si workaround 2513 * e1000_k1_gig_workaround_lv - K1 Si workaround
2494 * @hw: pointer to the HW structure 2514 * @hw: pointer to the HW structure
2495 * 2515 *
2496 * Workaround to set the K1 beacon duration for 82579 parts 2516 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2517 * Disable K1 in 1000Mbps and 100Mbps
2497 **/ 2518 **/
2498static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) 2519static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2499{ 2520{
2500 s32 ret_val = 0; 2521 s32 ret_val = 0;
2501 u16 status_reg = 0; 2522 u16 status_reg = 0;
2502 u32 mac_reg;
2503 u16 phy_reg;
2504 2523
2505 if (hw->mac.type != e1000_pch2lan) 2524 if (hw->mac.type != e1000_pch2lan)
2506 return 0; 2525 return 0;
2507 2526
2508 /* Set K1 beacon duration based on 1Gbps speed or otherwise */ 2527 /* Set K1 beacon duration based on 10Mbs speed */
2509 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); 2528 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
2510 if (ret_val) 2529 if (ret_val)
2511 return ret_val; 2530 return ret_val;
2512 2531
2513 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) 2532 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2514 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { 2533 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2515 mac_reg = er32(FEXTNVM4); 2534 if (status_reg &
2516 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 2535 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2517
2518 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
2519 if (ret_val)
2520 return ret_val;
2521
2522 if (status_reg & HV_M_STATUS_SPEED_1000) {
2523 u16 pm_phy_reg; 2536 u16 pm_phy_reg;
2524 2537
2525 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; 2538 /* LV 1G/100 Packet drop issue wa */
2526 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2527 /* LV 1G Packet drop issue wa */
2528 ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg); 2539 ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
2529 if (ret_val) 2540 if (ret_val)
2530 return ret_val; 2541 return ret_val;
2531 pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA; 2542 pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2532 ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg); 2543 ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
2533 if (ret_val) 2544 if (ret_val)
2534 return ret_val; 2545 return ret_val;
2535 } else { 2546 } else {
2547 u32 mac_reg;
2548
2549 mac_reg = er32(FEXTNVM4);
2550 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2536 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; 2551 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2537 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; 2552 ew32(FEXTNVM4, mac_reg);
2538 } 2553 }
2539 ew32(FEXTNVM4, mac_reg);
2540 ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
2541 } 2554 }
2542 2555
2543 return ret_val; 2556 return ret_val;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index bead50f9187b..5515126c81c1 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -232,16 +232,19 @@
232#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ 232#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
233#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ 233#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
234#define I82579_RX_CONFIG 0x3412 /* Receive configuration */ 234#define I82579_RX_CONFIG 0x3412 /* Receive configuration */
235#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */
235#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */ 236#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */
236#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ 237#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
237#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ 238#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
238#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ 239#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
239#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */ 240#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */
240#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */ 241#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */
242#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */
241#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */ 243#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */
242#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */ 244#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */
243#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ 245#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
244#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ 246#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
247#define I217_RX_CONFIG 0xB20C /* Receive configuration */
245 248
246#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */ 249#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */
247#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */ 250#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index d50c91e50528..3e69386add04 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1165,7 +1165,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
1165 dev_kfree_skb_any(adapter->tx_hwtstamp_skb); 1165 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
1166 adapter->tx_hwtstamp_skb = NULL; 1166 adapter->tx_hwtstamp_skb = NULL;
1167 adapter->tx_hwtstamp_timeouts++; 1167 adapter->tx_hwtstamp_timeouts++;
1168 e_warn("clearing Tx timestamp hang"); 1168 e_warn("clearing Tx timestamp hang\n");
1169 } else { 1169 } else {
1170 /* reschedule to check later */ 1170 /* reschedule to check later */
1171 schedule_work(&adapter->tx_hwtstamp_work); 1171 schedule_work(&adapter->tx_hwtstamp_work);
@@ -5687,7 +5687,7 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5687static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 5687static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5688{ 5688{
5689 struct e1000_adapter *adapter = netdev_priv(netdev); 5689 struct e1000_adapter *adapter = netdev_priv(netdev);
5690 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5690 int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
5691 5691
5692 /* Jumbo frame support */ 5692 /* Jumbo frame support */
5693 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && 5693 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
@@ -6235,6 +6235,7 @@ static int __e1000_resume(struct pci_dev *pdev)
6235 return 0; 6235 return 0;
6236} 6236}
6237 6237
6238#ifdef CONFIG_PM_SLEEP
6238static int e1000e_pm_thaw(struct device *dev) 6239static int e1000e_pm_thaw(struct device *dev)
6239{ 6240{
6240 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); 6241 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
@@ -6255,7 +6256,6 @@ static int e1000e_pm_thaw(struct device *dev)
6255 return 0; 6256 return 0;
6256} 6257}
6257 6258
6258#ifdef CONFIG_PM_SLEEP
6259static int e1000e_pm_suspend(struct device *dev) 6259static int e1000e_pm_suspend(struct device *dev)
6260{ 6260{
6261 struct pci_dev *pdev = to_pci_dev(dev); 6261 struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index 3841bccf058c..537d2780b408 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -164,6 +164,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
164#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 164#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
165#define HV_M_STATUS_SPEED_MASK 0x0300 165#define HV_M_STATUS_SPEED_MASK 0x0300
166#define HV_M_STATUS_SPEED_1000 0x0200 166#define HV_M_STATUS_SPEED_1000 0x0200
167#define HV_M_STATUS_SPEED_100 0x0100
167#define HV_M_STATUS_LINK_UP 0x0040 168#define HV_M_STATUS_LINK_UP 0x0040
168 169
169#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 170#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 861b722c2672..cf0761f08911 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2897,12 +2897,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
2897 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); 2897 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
2898 2898
2899 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { 2899 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
2900 ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 2900 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2901 i40e_ptp_tx_hwtstamp(pf); 2901 i40e_ptp_tx_hwtstamp(pf);
2902 prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK;
2903 } 2902 }
2904
2905 wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat);
2906 } 2903 }
2907 2904
2908 /* If a critical error is pending we have no choice but to reset the 2905 /* If a critical error is pending we have no choice but to reset the
@@ -4271,6 +4268,14 @@ static int i40e_open(struct net_device *netdev)
4271 if (err) 4268 if (err)
4272 return err; 4269 return err;
4273 4270
4271 /* configure global TSO hardware offload settings */
4272 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
4273 TCP_FLAG_FIN) >> 16);
4274 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
4275 TCP_FLAG_FIN |
4276 TCP_FLAG_CWR) >> 16);
4277 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
4278
4274#ifdef CONFIG_I40E_VXLAN 4279#ifdef CONFIG_I40E_VXLAN
4275 vxlan_get_rx_port(netdev); 4280 vxlan_get_rx_port(netdev);
4276#endif 4281#endif
@@ -6712,6 +6717,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
6712 NETIF_F_HW_VLAN_CTAG_FILTER | 6717 NETIF_F_HW_VLAN_CTAG_FILTER |
6713 NETIF_F_IPV6_CSUM | 6718 NETIF_F_IPV6_CSUM |
6714 NETIF_F_TSO | 6719 NETIF_F_TSO |
6720 NETIF_F_TSO_ECN |
6715 NETIF_F_TSO6 | 6721 NETIF_F_TSO6 |
6716 NETIF_F_RXCSUM | 6722 NETIF_F_RXCSUM |
6717 NETIF_F_NTUPLE | 6723 NETIF_F_NTUPLE |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 262bdf11d221..81299189a47d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -160,7 +160,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
160 udelay(5); 160 udelay(5);
161 } 161 }
162 if (ret_code == I40E_ERR_TIMEOUT) 162 if (ret_code == I40E_ERR_TIMEOUT)
163 hw_dbg(hw, "Done bit in GLNVM_SRCTL not set"); 163 hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n");
164 return ret_code; 164 return ret_code;
165} 165}
166 166
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index e33ec6c842b7..e61e63720800 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -239,7 +239,7 @@ static void i40e_ptp_tx_work(struct work_struct *work)
239 dev_kfree_skb_any(pf->ptp_tx_skb); 239 dev_kfree_skb_any(pf->ptp_tx_skb);
240 pf->ptp_tx_skb = NULL; 240 pf->ptp_tx_skb = NULL;
241 pf->tx_hwtstamp_timeouts++; 241 pf->tx_hwtstamp_timeouts++;
242 dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang"); 242 dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang\n");
243 return; 243 return;
244 } 244 }
245 245
@@ -321,7 +321,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
321 pf->last_rx_ptp_check = jiffies; 321 pf->last_rx_ptp_check = jiffies;
322 pf->rx_hwtstamp_cleared++; 322 pf->rx_hwtstamp_cleared++;
323 dev_warn(&vsi->back->pdev->dev, 323 dev_warn(&vsi->back->pdev->dev,
324 "%s: clearing Rx timestamp hang", 324 "%s: clearing Rx timestamp hang\n",
325 __func__); 325 __func__);
326 } 326 }
327} 327}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 0f5d96ad281d..9478ddc66caf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -418,7 +418,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
418 } 418 }
419 break; 419 break;
420 default: 420 default:
421 dev_info(&pf->pdev->dev, "Could not specify spec type %d", 421 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
422 input->flow_type); 422 input->flow_type);
423 ret = -EINVAL; 423 ret = -EINVAL;
424 } 424 }
@@ -478,7 +478,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
478 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; 478 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
479 } 479 }
480 } else { 480 } else {
481 dev_info(&pdev->dev, "FD filter programming error"); 481 dev_info(&pdev->dev, "FD filter programming error\n");
482 } 482 }
483 } else if (error == 483 } else if (error ==
484 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { 484 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
@@ -1713,9 +1713,11 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1713 I40E_TX_FLAGS_VLAN_PRIO_SHIFT; 1713 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
1714 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) { 1714 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
1715 struct vlan_ethhdr *vhdr; 1715 struct vlan_ethhdr *vhdr;
1716 if (skb_header_cloned(skb) && 1716 int rc;
1717 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1717
1718 return -ENOMEM; 1718 rc = skb_cow_head(skb, 0);
1719 if (rc < 0)
1720 return rc;
1719 vhdr = (struct vlan_ethhdr *)skb->data; 1721 vhdr = (struct vlan_ethhdr *)skb->data;
1720 vhdr->h_vlan_TCI = htons(tx_flags >> 1722 vhdr->h_vlan_TCI = htons(tx_flags >>
1721 I40E_TX_FLAGS_VLAN_SHIFT); 1723 I40E_TX_FLAGS_VLAN_SHIFT);
@@ -1743,20 +1745,18 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1743 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) 1745 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
1744{ 1746{
1745 u32 cd_cmd, cd_tso_len, cd_mss; 1747 u32 cd_cmd, cd_tso_len, cd_mss;
1748 struct ipv6hdr *ipv6h;
1746 struct tcphdr *tcph; 1749 struct tcphdr *tcph;
1747 struct iphdr *iph; 1750 struct iphdr *iph;
1748 u32 l4len; 1751 u32 l4len;
1749 int err; 1752 int err;
1750 struct ipv6hdr *ipv6h;
1751 1753
1752 if (!skb_is_gso(skb)) 1754 if (!skb_is_gso(skb))
1753 return 0; 1755 return 0;
1754 1756
1755 if (skb_header_cloned(skb)) { 1757 err = skb_cow_head(skb, 0);
1756 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1758 if (err < 0)
1757 if (err) 1759 return err;
1758 return err;
1759 }
1760 1760
1761 if (protocol == htons(ETH_P_IP)) { 1761 if (protocol == htons(ETH_P_IP)) {
1762 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 1762 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index db963397cc27..f67f8a170b90 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -365,7 +365,7 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
365 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); 365 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
366 if (word_address == address) { 366 if (word_address == address) {
367 *data = INVM_DWORD_TO_WORD_DATA(invm_dword); 367 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
368 hw_dbg("Read INVM Word 0x%02x = %x", 368 hw_dbg("Read INVM Word 0x%02x = %x\n",
369 address, *data); 369 address, *data);
370 status = E1000_SUCCESS; 370 status = E1000_SUCCESS;
371 break; 371 break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 5910a932ea7c..1e0c404db81a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -929,11 +929,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
929 */ 929 */
930 if (hw->fc.requested_mode == e1000_fc_full) { 930 if (hw->fc.requested_mode == e1000_fc_full) {
931 hw->fc.current_mode = e1000_fc_full; 931 hw->fc.current_mode = e1000_fc_full;
932 hw_dbg("Flow Control = FULL.\r\n"); 932 hw_dbg("Flow Control = FULL.\n");
933 } else { 933 } else {
934 hw->fc.current_mode = e1000_fc_rx_pause; 934 hw->fc.current_mode = e1000_fc_rx_pause;
935 hw_dbg("Flow Control = " 935 hw_dbg("Flow Control = RX PAUSE frames only.\n");
936 "RX PAUSE frames only.\r\n");
937 } 936 }
938 } 937 }
939 /* For receiving PAUSE frames ONLY. 938 /* For receiving PAUSE frames ONLY.
@@ -948,7 +947,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
948 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 947 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
949 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 948 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
950 hw->fc.current_mode = e1000_fc_tx_pause; 949 hw->fc.current_mode = e1000_fc_tx_pause;
951 hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); 950 hw_dbg("Flow Control = TX PAUSE frames only.\n");
952 } 951 }
953 /* For transmitting PAUSE frames ONLY. 952 /* For transmitting PAUSE frames ONLY.
954 * 953 *
@@ -962,7 +961,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
962 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 961 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
963 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 962 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
964 hw->fc.current_mode = e1000_fc_rx_pause; 963 hw->fc.current_mode = e1000_fc_rx_pause;
965 hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 964 hw_dbg("Flow Control = RX PAUSE frames only.\n");
966 } 965 }
967 /* Per the IEEE spec, at this point flow control should be 966 /* Per the IEEE spec, at this point flow control should be
968 * disabled. However, we want to consider that we could 967 * disabled. However, we want to consider that we could
@@ -988,10 +987,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
988 (hw->fc.requested_mode == e1000_fc_tx_pause) || 987 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
989 (hw->fc.strict_ieee)) { 988 (hw->fc.strict_ieee)) {
990 hw->fc.current_mode = e1000_fc_none; 989 hw->fc.current_mode = e1000_fc_none;
991 hw_dbg("Flow Control = NONE.\r\n"); 990 hw_dbg("Flow Control = NONE.\n");
992 } else { 991 } else {
993 hw->fc.current_mode = e1000_fc_rx_pause; 992 hw->fc.current_mode = e1000_fc_rx_pause;
994 hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 993 hw_dbg("Flow Control = RX PAUSE frames only.\n");
995 } 994 }
996 995
997 /* Now we need to do one last check... If we auto- 996 /* Now we need to do one last check... If we auto-
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fb98d4602f9d..16430a8440fa 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5193,8 +5193,10 @@ void igb_update_stats(struct igb_adapter *adapter,
5193 5193
5194 rcu_read_lock(); 5194 rcu_read_lock();
5195 for (i = 0; i < adapter->num_rx_queues; i++) { 5195 for (i = 0; i < adapter->num_rx_queues; i++) {
5196 u32 rqdpc = rd32(E1000_RQDPC(i));
5197 struct igb_ring *ring = adapter->rx_ring[i]; 5196 struct igb_ring *ring = adapter->rx_ring[i];
5197 u32 rqdpc = rd32(E1000_RQDPC(i));
5198 if (hw->mac.type >= e1000_i210)
5199 wr32(E1000_RQDPC(i), 0);
5198 5200
5199 if (rqdpc) { 5201 if (rqdpc) {
5200 ring->rx_stats.drops += rqdpc; 5202 ring->rx_stats.drops += rqdpc;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 9209d652e1c9..ab25e49365f7 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -389,7 +389,7 @@ static void igb_ptp_tx_work(struct work_struct *work)
389 adapter->ptp_tx_skb = NULL; 389 adapter->ptp_tx_skb = NULL;
390 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); 390 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
391 adapter->tx_hwtstamp_timeouts++; 391 adapter->tx_hwtstamp_timeouts++;
392 dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang"); 392 dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
393 return; 393 return;
394 } 394 }
395 395
@@ -451,7 +451,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
451 rd32(E1000_RXSTMPH); 451 rd32(E1000_RXSTMPH);
452 adapter->last_rx_ptp_check = jiffies; 452 adapter->last_rx_ptp_check = jiffies;
453 adapter->rx_hwtstamp_cleared++; 453 adapter->rx_hwtstamp_cleared++;
454 dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang"); 454 dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n");
455 } 455 }
456} 456}
457 457
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 1a12c1dd7a27..c6c4ca7d68e6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -256,7 +256,6 @@ struct ixgbe_ring {
256 struct ixgbe_tx_buffer *tx_buffer_info; 256 struct ixgbe_tx_buffer *tx_buffer_info;
257 struct ixgbe_rx_buffer *rx_buffer_info; 257 struct ixgbe_rx_buffer *rx_buffer_info;
258 }; 258 };
259 unsigned long last_rx_timestamp;
260 unsigned long state; 259 unsigned long state;
261 u8 __iomem *tail; 260 u8 __iomem *tail;
262 dma_addr_t dma; /* phys. address of descriptor ring */ 261 dma_addr_t dma; /* phys. address of descriptor ring */
@@ -770,6 +769,7 @@ struct ixgbe_adapter {
770 unsigned long ptp_tx_start; 769 unsigned long ptp_tx_start;
771 unsigned long last_overflow_check; 770 unsigned long last_overflow_check;
772 unsigned long last_rx_ptp_check; 771 unsigned long last_rx_ptp_check;
772 unsigned long last_rx_timestamp;
773 spinlock_t tmreg_lock; 773 spinlock_t tmreg_lock;
774 struct cyclecounter cc; 774 struct cyclecounter cc;
775 struct timecounter tc; 775 struct timecounter tc;
@@ -944,24 +944,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
944void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); 944void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
945void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); 945void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
946void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); 946void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
947void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, 947void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb);
948 struct sk_buff *skb);
949static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
950 union ixgbe_adv_rx_desc *rx_desc,
951 struct sk_buff *skb)
952{
953 if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
954 return;
955
956 __ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
957
958 /*
959 * Update the last_rx_timestamp timer in order to enable watchdog check
960 * for error case of latched timestamp on a dropped packet.
961 */
962 rx_ring->last_rx_timestamp = jiffies;
963}
964
965int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); 948int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
966int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); 949int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
967void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); 950void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 24fba39e194e..981b8a7b100d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1195,7 +1195,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1195 */ 1195 */
1196 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; 1196 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1197 1197
1198 hw_dbg(hw, "Detected EEPROM page size = %d words.", 1198 hw_dbg(hw, "Detected EEPROM page size = %d words.\n",
1199 hw->eeprom.word_page_size); 1199 hw->eeprom.word_page_size);
1200out: 1200out:
1201 return status; 1201 return status;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c4c526b7f99f..d62e7a25cf97 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1664,7 +1664,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1664 1664
1665 ixgbe_rx_checksum(rx_ring, rx_desc, skb); 1665 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1666 1666
1667 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); 1667 if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
1668 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb);
1668 1669
1669 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1670 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1670 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 1671 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 23f765263f12..a76af8e28a04 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -536,7 +536,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
536 536
537 if (time_out == max_time_out) { 537 if (time_out == max_time_out) {
538 status = IXGBE_ERR_LINK_SETUP; 538 status = IXGBE_ERR_LINK_SETUP;
539 hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out"); 539 hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n");
540 } 540 }
541 541
542 return status; 542 return status;
@@ -745,7 +745,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
745 745
746 if (time_out == max_time_out) { 746 if (time_out == max_time_out) {
747 status = IXGBE_ERR_LINK_SETUP; 747 status = IXGBE_ERR_LINK_SETUP;
748 hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out"); 748 hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n");
749 } 749 }
750 750
751 return status; 751 return status;
@@ -1175,7 +1175,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1175 status = 0; 1175 status = 0;
1176 } else { 1176 } else {
1177 if (hw->allow_unsupported_sfp) { 1177 if (hw->allow_unsupported_sfp) {
1178 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules."); 1178 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1179 status = 0; 1179 status = 0;
1180 } else { 1180 } else {
1181 hw_dbg(hw, 1181 hw_dbg(hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 63515a6f67fa..8902ae683457 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -435,10 +435,8 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
435void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) 435void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
436{ 436{
437 struct ixgbe_hw *hw = &adapter->hw; 437 struct ixgbe_hw *hw = &adapter->hw;
438 struct ixgbe_ring *rx_ring;
439 u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 438 u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
440 unsigned long rx_event; 439 unsigned long rx_event;
441 int n;
442 440
443 /* if we don't have a valid timestamp in the registers, just update the 441 /* if we don't have a valid timestamp in the registers, just update the
444 * timeout counter and exit 442 * timeout counter and exit
@@ -450,18 +448,15 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
450 448
451 /* determine the most recent watchdog or rx_timestamp event */ 449 /* determine the most recent watchdog or rx_timestamp event */
452 rx_event = adapter->last_rx_ptp_check; 450 rx_event = adapter->last_rx_ptp_check;
453 for (n = 0; n < adapter->num_rx_queues; n++) { 451 if (time_after(adapter->last_rx_timestamp, rx_event))
454 rx_ring = adapter->rx_ring[n]; 452 rx_event = adapter->last_rx_timestamp;
455 if (time_after(rx_ring->last_rx_timestamp, rx_event))
456 rx_event = rx_ring->last_rx_timestamp;
457 }
458 453
459 /* only need to read the high RXSTMP register to clear the lock */ 454 /* only need to read the high RXSTMP register to clear the lock */
460 if (time_is_before_jiffies(rx_event + 5*HZ)) { 455 if (time_is_before_jiffies(rx_event + 5*HZ)) {
461 IXGBE_READ_REG(hw, IXGBE_RXSTMPH); 456 IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
462 adapter->last_rx_ptp_check = jiffies; 457 adapter->last_rx_ptp_check = jiffies;
463 458
464 e_warn(drv, "clearing RX Timestamp hang"); 459 e_warn(drv, "clearing RX Timestamp hang\n");
465 } 460 }
466} 461}
467 462
@@ -517,7 +512,7 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
517 dev_kfree_skb_any(adapter->ptp_tx_skb); 512 dev_kfree_skb_any(adapter->ptp_tx_skb);
518 adapter->ptp_tx_skb = NULL; 513 adapter->ptp_tx_skb = NULL;
519 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); 514 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
520 e_warn(drv, "clearing Tx Timestamp hang"); 515 e_warn(drv, "clearing Tx Timestamp hang\n");
521 return; 516 return;
522 } 517 }
523 518
@@ -530,35 +525,22 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
530} 525}
531 526
532/** 527/**
533 * __ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp 528 * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
534 * @q_vector: structure containing interrupt and ring information 529 * @adapter: pointer to adapter struct
535 * @skb: particular skb to send timestamp with 530 * @skb: particular skb to send timestamp with
536 * 531 *
537 * if the timestamp is valid, we convert it into the timecounter ns 532 * if the timestamp is valid, we convert it into the timecounter ns
538 * value, then store that result into the shhwtstamps structure which 533 * value, then store that result into the shhwtstamps structure which
539 * is passed up the network stack 534 * is passed up the network stack
540 */ 535 */
541void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, 536void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb)
542 struct sk_buff *skb)
543{ 537{
544 struct ixgbe_adapter *adapter; 538 struct ixgbe_hw *hw = &adapter->hw;
545 struct ixgbe_hw *hw;
546 struct skb_shared_hwtstamps *shhwtstamps; 539 struct skb_shared_hwtstamps *shhwtstamps;
547 u64 regval = 0, ns; 540 u64 regval = 0, ns;
548 u32 tsyncrxctl; 541 u32 tsyncrxctl;
549 unsigned long flags; 542 unsigned long flags;
550 543
551 /* we cannot process timestamps on a ring without a q_vector */
552 if (!q_vector || !q_vector->adapter)
553 return;
554
555 adapter = q_vector->adapter;
556 hw = &adapter->hw;
557
558 /*
559 * Read the tsyncrxctl register afterwards in order to prevent taking an
560 * I/O hit on every packet.
561 */
562 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 544 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
563 if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) 545 if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
564 return; 546 return;
@@ -566,13 +548,17 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
566 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 548 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
567 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; 549 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
568 550
569
570 spin_lock_irqsave(&adapter->tmreg_lock, flags); 551 spin_lock_irqsave(&adapter->tmreg_lock, flags);
571 ns = timecounter_cyc2time(&adapter->tc, regval); 552 ns = timecounter_cyc2time(&adapter->tc, regval);
572 spin_unlock_irqrestore(&adapter->tmreg_lock, flags); 553 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
573 554
574 shhwtstamps = skb_hwtstamps(skb); 555 shhwtstamps = skb_hwtstamps(skb);
575 shhwtstamps->hwtstamp = ns_to_ktime(ns); 556 shhwtstamps->hwtstamp = ns_to_ktime(ns);
557
558 /* Update the last_rx_timestamp timer in order to enable watchdog check
559 * for error case of latched timestamp on a dropped packet.
560 */
561 adapter->last_rx_timestamp = jiffies;
576} 562}
577 563
578int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) 564int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b0c6050479eb..b78378cea5e3 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme,
1988 return idx; 1988 return idx;
1989} 1989}
1990 1990
1991static void 1991static int
1992jme_fill_tx_map(struct pci_dev *pdev, 1992jme_fill_tx_map(struct pci_dev *pdev,
1993 struct txdesc *txdesc, 1993 struct txdesc *txdesc,
1994 struct jme_buffer_info *txbi, 1994 struct jme_buffer_info *txbi,
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev,
2005 len, 2005 len,
2006 PCI_DMA_TODEVICE); 2006 PCI_DMA_TODEVICE);
2007 2007
2008 if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
2009 return -EINVAL;
2010
2008 pci_dma_sync_single_for_device(pdev, 2011 pci_dma_sync_single_for_device(pdev,
2009 dmaaddr, 2012 dmaaddr,
2010 len, 2013 len,
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev,
2021 2024
2022 txbi->mapping = dmaaddr; 2025 txbi->mapping = dmaaddr;
2023 txbi->len = len; 2026 txbi->len = len;
2027 return 0;
2024} 2028}
2025 2029
2026static void 2030static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
2031{
2032 struct jme_ring *txring = &(jme->txring[0]);
2033 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
2034 int mask = jme->tx_ring_mask;
2035 int j;
2036
2037 for (j = 0 ; j < count ; j++) {
2038 ctxbi = txbi + ((startidx + j + 2) & (mask));
2039 pci_unmap_page(jme->pdev,
2040 ctxbi->mapping,
2041 ctxbi->len,
2042 PCI_DMA_TODEVICE);
2043
2044 ctxbi->mapping = 0;
2045 ctxbi->len = 0;
2046 }
2047
2048}
2049
2050static int
2027jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) 2051jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2028{ 2052{
2029 struct jme_ring *txring = &(jme->txring[0]); 2053 struct jme_ring *txring = &(jme->txring[0]);
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2034 int mask = jme->tx_ring_mask; 2058 int mask = jme->tx_ring_mask;
2035 const struct skb_frag_struct *frag; 2059 const struct skb_frag_struct *frag;
2036 u32 len; 2060 u32 len;
2061 int ret = 0;
2037 2062
2038 for (i = 0 ; i < nr_frags ; ++i) { 2063 for (i = 0 ; i < nr_frags ; ++i) {
2039 frag = &skb_shinfo(skb)->frags[i]; 2064 frag = &skb_shinfo(skb)->frags[i];
2040 ctxdesc = txdesc + ((idx + i + 2) & (mask)); 2065 ctxdesc = txdesc + ((idx + i + 2) & (mask));
2041 ctxbi = txbi + ((idx + i + 2) & (mask)); 2066 ctxbi = txbi + ((idx + i + 2) & (mask));
2042 2067
2043 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, 2068 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
2044 skb_frag_page(frag), 2069 skb_frag_page(frag),
2045 frag->page_offset, skb_frag_size(frag), hidma); 2070 frag->page_offset, skb_frag_size(frag), hidma);
2071 if (ret) {
2072 jme_drop_tx_map(jme, idx, i);
2073 goto out;
2074 }
2075
2046 } 2076 }
2047 2077
2048 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 2078 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
2049 ctxdesc = txdesc + ((idx + 1) & (mask)); 2079 ctxdesc = txdesc + ((idx + 1) & (mask));
2050 ctxbi = txbi + ((idx + 1) & (mask)); 2080 ctxbi = txbi + ((idx + 1) & (mask));
2051 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), 2081 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
2052 offset_in_page(skb->data), len, hidma); 2082 offset_in_page(skb->data), len, hidma);
2083 if (ret)
2084 jme_drop_tx_map(jme, idx, i);
2085
2086out:
2087 return ret;
2053 2088
2054} 2089}
2055 2090
2091
2056static int 2092static int
2057jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) 2093jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
2058{ 2094{
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2131 struct txdesc *txdesc; 2167 struct txdesc *txdesc;
2132 struct jme_buffer_info *txbi; 2168 struct jme_buffer_info *txbi;
2133 u8 flags; 2169 u8 flags;
2170 int ret = 0;
2134 2171
2135 txdesc = (struct txdesc *)txring->desc + idx; 2172 txdesc = (struct txdesc *)txring->desc + idx;
2136 txbi = txring->bufinf + idx; 2173 txbi = txring->bufinf + idx;
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2155 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) 2192 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
2156 jme_tx_csum(jme, skb, &flags); 2193 jme_tx_csum(jme, skb, &flags);
2157 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); 2194 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
2158 jme_map_tx_skb(jme, skb, idx); 2195 ret = jme_map_tx_skb(jme, skb, idx);
2196 if (ret)
2197 return ret;
2198
2159 txdesc->desc1.flags = flags; 2199 txdesc->desc1.flags = flags;
2160 /* 2200 /*
2161 * Set tx buffer info after telling NIC to send 2201 * Set tx buffer info after telling NIC to send
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2228 return NETDEV_TX_BUSY; 2268 return NETDEV_TX_BUSY;
2229 } 2269 }
2230 2270
2231 jme_fill_tx_desc(jme, skb, idx); 2271 if (jme_fill_tx_desc(jme, skb, idx))
2272 return NETDEV_TX_OK;
2232 2273
2233 jwrite32(jme, JME_TXCS, jme->reg_txcs | 2274 jwrite32(jme, JME_TXCS, jme->reg_txcs |
2234 TXCS_SELECT_QUEUE0 | 2275 TXCS_SELECT_QUEUE0 |
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index b161a525fc5b..9d5ced263a5e 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -232,7 +232,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
232 clk_prepare_enable(dev->clk); 232 clk_prepare_enable(dev->clk);
233 233
234 dev->err_interrupt = platform_get_irq(pdev, 0); 234 dev->err_interrupt = platform_get_irq(pdev, 0);
235 if (dev->err_interrupt != -ENXIO) { 235 if (dev->err_interrupt > 0) {
236 ret = devm_request_irq(&pdev->dev, dev->err_interrupt, 236 ret = devm_request_irq(&pdev->dev, dev->err_interrupt,
237 orion_mdio_err_irq, 237 orion_mdio_err_irq,
238 IRQF_SHARED, pdev->name, dev); 238 IRQF_SHARED, pdev->name, dev);
@@ -241,6 +241,9 @@ static int orion_mdio_probe(struct platform_device *pdev)
241 241
242 writel(MVMDIO_ERR_INT_SMI_DONE, 242 writel(MVMDIO_ERR_INT_SMI_DONE,
243 dev->regs + MVMDIO_ERR_INT_MASK); 243 dev->regs + MVMDIO_ERR_INT_MASK);
244
245 } else if (dev->err_interrupt == -EPROBE_DEFER) {
246 return -EPROBE_DEFER;
244 } 247 }
245 248
246 mutex_init(&dev->lock); 249 mutex_init(&dev->lock);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d04b1c3c9b85..14786c8bf99e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -91,7 +91,7 @@
91#define MVNETA_RX_MIN_FRAME_SIZE 0x247c 91#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
92#define MVNETA_SERDES_CFG 0x24A0 92#define MVNETA_SERDES_CFG 0x24A0
93#define MVNETA_SGMII_SERDES_PROTO 0x0cc7 93#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
94#define MVNETA_RGMII_SERDES_PROTO 0x0667 94#define MVNETA_QSGMII_SERDES_PROTO 0x0667
95#define MVNETA_TYPE_PRIO 0x24bc 95#define MVNETA_TYPE_PRIO 0x24bc
96#define MVNETA_FORCE_UNI BIT(21) 96#define MVNETA_FORCE_UNI BIT(21)
97#define MVNETA_TXQ_CMD_1 0x24e4 97#define MVNETA_TXQ_CMD_1 0x24e4
@@ -2721,29 +2721,44 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
2721} 2721}
2722 2722
2723/* Power up the port */ 2723/* Power up the port */
2724static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) 2724static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
2725{ 2725{
2726 u32 val; 2726 u32 ctrl;
2727 2727
2728 /* MAC Cause register should be cleared */ 2728 /* MAC Cause register should be cleared */
2729 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); 2729 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
2730 2730
2731 if (phy_mode == PHY_INTERFACE_MODE_SGMII) 2731 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
2732 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
2733 else
2734 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
2735 2732
2736 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 2733 /* Even though it might look weird, when we're configured in
2737 2734 * SGMII or QSGMII mode, the RGMII bit needs to be set.
2738 val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; 2735 */
2736 switch(phy_mode) {
2737 case PHY_INTERFACE_MODE_QSGMII:
2738 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
2739 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
2740 break;
2741 case PHY_INTERFACE_MODE_SGMII:
2742 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
2743 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
2744 break;
2745 case PHY_INTERFACE_MODE_RGMII:
2746 case PHY_INTERFACE_MODE_RGMII_ID:
2747 ctrl |= MVNETA_GMAC2_PORT_RGMII;
2748 break;
2749 default:
2750 return -EINVAL;
2751 }
2739 2752
2740 /* Cancel Port Reset */ 2753 /* Cancel Port Reset */
2741 val &= ~MVNETA_GMAC2_PORT_RESET; 2754 ctrl &= ~MVNETA_GMAC2_PORT_RESET;
2742 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); 2755 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
2743 2756
2744 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & 2757 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
2745 MVNETA_GMAC2_PORT_RESET) != 0) 2758 MVNETA_GMAC2_PORT_RESET) != 0)
2746 continue; 2759 continue;
2760
2761 return 0;
2747} 2762}
2748 2763
2749/* Device initialization routine */ 2764/* Device initialization routine */
@@ -2854,7 +2869,12 @@ static int mvneta_probe(struct platform_device *pdev)
2854 dev_err(&pdev->dev, "can't init eth hal\n"); 2869 dev_err(&pdev->dev, "can't init eth hal\n");
2855 goto err_free_stats; 2870 goto err_free_stats;
2856 } 2871 }
2857 mvneta_port_power_up(pp, phy_mode); 2872
2873 err = mvneta_port_power_up(pp, phy_mode);
2874 if (err < 0) {
2875 dev_err(&pdev->dev, "can't power up port\n");
2876 goto err_deinit;
2877 }
2858 2878
2859 dram_target_info = mv_mbus_dram_info(); 2879 dram_target_info = mv_mbus_dram_info();
2860 if (dram_target_info) 2880 if (dram_target_info)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78099eab7673..92d3249f63f1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1253,12 +1253,12 @@ static struct mlx4_cmd_info cmd_info[] = {
1253 }, 1253 },
1254 { 1254 {
1255 .opcode = MLX4_CMD_UPDATE_QP, 1255 .opcode = MLX4_CMD_UPDATE_QP,
1256 .has_inbox = false, 1256 .has_inbox = true,
1257 .has_outbox = false, 1257 .has_outbox = false,
1258 .out_is_imm = false, 1258 .out_is_imm = false,
1259 .encode_slave_id = false, 1259 .encode_slave_id = false,
1260 .verify = NULL, 1260 .verify = NULL,
1261 .wrapper = mlx4_CMD_EPERM_wrapper 1261 .wrapper = mlx4_UPDATE_QP_wrapper
1262 }, 1262 },
1263 { 1263 {
1264 .opcode = MLX4_CMD_GET_OP_REQ, 1264 .opcode = MLX4_CMD_GET_OP_REQ,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 70e95324a97d..c2cd8d31bcad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -66,7 +66,6 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
66 66
67 cq->ring = ring; 67 cq->ring = ring;
68 cq->is_tx = mode; 68 cq->is_tx = mode;
69 spin_lock_init(&cq->lock);
70 69
71 /* Allocate HW buffers on provided NUMA node. 70 /* Allocate HW buffers on provided NUMA node.
72 * dev->numa_node is used in mtt range allocation flow. 71 * dev->numa_node is used in mtt range allocation flow.
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f085c2df5e69..7e4b1720c3d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1304,15 +1304,11 @@ static void mlx4_en_netpoll(struct net_device *dev)
1304{ 1304{
1305 struct mlx4_en_priv *priv = netdev_priv(dev); 1305 struct mlx4_en_priv *priv = netdev_priv(dev);
1306 struct mlx4_en_cq *cq; 1306 struct mlx4_en_cq *cq;
1307 unsigned long flags;
1308 int i; 1307 int i;
1309 1308
1310 for (i = 0; i < priv->rx_ring_num; i++) { 1309 for (i = 0; i < priv->rx_ring_num; i++) {
1311 cq = priv->rx_cq[i]; 1310 cq = priv->rx_cq[i];
1312 spin_lock_irqsave(&cq->lock, flags); 1311 napi_schedule(&cq->napi);
1313 napi_synchronize(&cq->napi);
1314 mlx4_en_process_rx_cq(dev, cq, 0);
1315 spin_unlock_irqrestore(&cq->lock, flags);
1316 } 1312 }
1317} 1313}
1318#endif 1314#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f0ae95f66ceb..7cf9dadcb471 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -754,10 +754,10 @@ static void mlx4_request_modules(struct mlx4_dev *dev)
754 has_eth_port = true; 754 has_eth_port = true;
755 } 755 }
756 756
757 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
758 request_module_nowait(IB_DRV_NAME);
759 if (has_eth_port) 757 if (has_eth_port)
760 request_module_nowait(EN_DRV_NAME); 758 request_module_nowait(EN_DRV_NAME);
759 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
760 request_module_nowait(IB_DRV_NAME);
761} 761}
762 762
763/* 763/*
@@ -2301,13 +2301,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2301 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 2301 /* Allow large DMA segments, up to the firmware limit of 1 GB */
2302 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 2302 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
2303 2303
2304 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2304 dev = pci_get_drvdata(pdev);
2305 if (!priv) { 2305 priv = mlx4_priv(dev);
2306 err = -ENOMEM;
2307 goto err_release_regions;
2308 }
2309
2310 dev = &priv->dev;
2311 dev->pdev = pdev; 2306 dev->pdev = pdev;
2312 INIT_LIST_HEAD(&priv->ctx_list); 2307 INIT_LIST_HEAD(&priv->ctx_list);
2313 spin_lock_init(&priv->ctx_lock); 2308 spin_lock_init(&priv->ctx_lock);
@@ -2374,10 +2369,10 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2374 } else { 2369 } else {
2375 atomic_inc(&pf_loading); 2370 atomic_inc(&pf_loading);
2376 err = pci_enable_sriov(pdev, total_vfs); 2371 err = pci_enable_sriov(pdev, total_vfs);
2377 atomic_dec(&pf_loading);
2378 if (err) { 2372 if (err) {
2379 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", 2373 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
2380 err); 2374 err);
2375 atomic_dec(&pf_loading);
2381 err = 0; 2376 err = 0;
2382 } else { 2377 } else {
2383 mlx4_warn(dev, "Running in master mode\n"); 2378 mlx4_warn(dev, "Running in master mode\n");
@@ -2445,7 +2440,8 @@ slave_start:
2445 * No return code for this call, just warn the user in case of PCI 2440 * No return code for this call, just warn the user in case of PCI
2446 * express device capabilities are under-satisfied by the bus. 2441 * express device capabilities are under-satisfied by the bus.
2447 */ 2442 */
2448 mlx4_check_pcie_caps(dev); 2443 if (!mlx4_is_slave(dev))
2444 mlx4_check_pcie_caps(dev);
2449 2445
2450 /* In master functions, the communication channel must be initialized 2446 /* In master functions, the communication channel must be initialized
2451 * after obtaining its address from fw */ 2447 * after obtaining its address from fw */
@@ -2535,8 +2531,10 @@ slave_start:
2535 mlx4_sense_init(dev); 2531 mlx4_sense_init(dev);
2536 mlx4_start_sense(dev); 2532 mlx4_start_sense(dev);
2537 2533
2538 priv->pci_dev_data = pci_dev_data; 2534 priv->removed = 0;
2539 pci_set_drvdata(pdev, dev); 2535
2536 if (mlx4_is_master(dev) && dev->num_vfs)
2537 atomic_dec(&pf_loading);
2540 2538
2541 return 0; 2539 return 0;
2542 2540
@@ -2588,6 +2586,9 @@ err_rel_own:
2588 if (!mlx4_is_slave(dev)) 2586 if (!mlx4_is_slave(dev))
2589 mlx4_free_ownership(dev); 2587 mlx4_free_ownership(dev);
2590 2588
2589 if (mlx4_is_master(dev) && dev->num_vfs)
2590 atomic_dec(&pf_loading);
2591
2591 kfree(priv->dev.dev_vfs); 2592 kfree(priv->dev.dev_vfs);
2592 2593
2593err_free_dev: 2594err_free_dev:
@@ -2604,85 +2605,110 @@ err_disable_pdev:
2604 2605
2605static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 2606static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
2606{ 2607{
2608 struct mlx4_priv *priv;
2609 struct mlx4_dev *dev;
2610
2607 printk_once(KERN_INFO "%s", mlx4_version); 2611 printk_once(KERN_INFO "%s", mlx4_version);
2608 2612
2613 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2614 if (!priv)
2615 return -ENOMEM;
2616
2617 dev = &priv->dev;
2618 pci_set_drvdata(pdev, dev);
2619 priv->pci_dev_data = id->driver_data;
2620
2609 return __mlx4_init_one(pdev, id->driver_data); 2621 return __mlx4_init_one(pdev, id->driver_data);
2610} 2622}
2611 2623
2612static void mlx4_remove_one(struct pci_dev *pdev) 2624static void __mlx4_remove_one(struct pci_dev *pdev)
2613{ 2625{
2614 struct mlx4_dev *dev = pci_get_drvdata(pdev); 2626 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2615 struct mlx4_priv *priv = mlx4_priv(dev); 2627 struct mlx4_priv *priv = mlx4_priv(dev);
2628 int pci_dev_data;
2616 int p; 2629 int p;
2617 2630
2618 if (dev) { 2631 if (priv->removed)
2619 /* in SRIOV it is not allowed to unload the pf's 2632 return;
2620 * driver while there are alive vf's */
2621 if (mlx4_is_master(dev)) {
2622 if (mlx4_how_many_lives_vf(dev))
2623 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
2624 }
2625 mlx4_stop_sense(dev);
2626 mlx4_unregister_device(dev);
2627 2633
2628 for (p = 1; p <= dev->caps.num_ports; p++) { 2634 pci_dev_data = priv->pci_dev_data;
2629 mlx4_cleanup_port_info(&priv->port[p]);
2630 mlx4_CLOSE_PORT(dev, p);
2631 }
2632 2635
2633 if (mlx4_is_master(dev)) 2636 /* in SRIOV it is not allowed to unload the pf's
2634 mlx4_free_resource_tracker(dev, 2637 * driver while there are alive vf's */
2635 RES_TR_FREE_SLAVES_ONLY); 2638 if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev))
2636 2639 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
2637 mlx4_cleanup_counters_table(dev); 2640 mlx4_stop_sense(dev);
2638 mlx4_cleanup_qp_table(dev); 2641 mlx4_unregister_device(dev);
2639 mlx4_cleanup_srq_table(dev);
2640 mlx4_cleanup_cq_table(dev);
2641 mlx4_cmd_use_polling(dev);
2642 mlx4_cleanup_eq_table(dev);
2643 mlx4_cleanup_mcg_table(dev);
2644 mlx4_cleanup_mr_table(dev);
2645 mlx4_cleanup_xrcd_table(dev);
2646 mlx4_cleanup_pd_table(dev);
2647 2642
2648 if (mlx4_is_master(dev)) 2643 for (p = 1; p <= dev->caps.num_ports; p++) {
2649 mlx4_free_resource_tracker(dev, 2644 mlx4_cleanup_port_info(&priv->port[p]);
2650 RES_TR_FREE_STRUCTS_ONLY); 2645 mlx4_CLOSE_PORT(dev, p);
2651 2646 }
2652 iounmap(priv->kar); 2647
2653 mlx4_uar_free(dev, &priv->driver_uar); 2648 if (mlx4_is_master(dev))
2654 mlx4_cleanup_uar_table(dev); 2649 mlx4_free_resource_tracker(dev,
2655 if (!mlx4_is_slave(dev)) 2650 RES_TR_FREE_SLAVES_ONLY);
2656 mlx4_clear_steering(dev); 2651
2657 mlx4_free_eq_table(dev); 2652 mlx4_cleanup_counters_table(dev);
2658 if (mlx4_is_master(dev)) 2653 mlx4_cleanup_qp_table(dev);
2659 mlx4_multi_func_cleanup(dev); 2654 mlx4_cleanup_srq_table(dev);
2660 mlx4_close_hca(dev); 2655 mlx4_cleanup_cq_table(dev);
2661 if (mlx4_is_slave(dev)) 2656 mlx4_cmd_use_polling(dev);
2662 mlx4_multi_func_cleanup(dev); 2657 mlx4_cleanup_eq_table(dev);
2663 mlx4_cmd_cleanup(dev); 2658 mlx4_cleanup_mcg_table(dev);
2664 2659 mlx4_cleanup_mr_table(dev);
2665 if (dev->flags & MLX4_FLAG_MSI_X) 2660 mlx4_cleanup_xrcd_table(dev);
2666 pci_disable_msix(pdev); 2661 mlx4_cleanup_pd_table(dev);
2667 if (dev->flags & MLX4_FLAG_SRIOV) {
2668 mlx4_warn(dev, "Disabling SR-IOV\n");
2669 pci_disable_sriov(pdev);
2670 }
2671 2662
2672 if (!mlx4_is_slave(dev)) 2663 if (mlx4_is_master(dev))
2673 mlx4_free_ownership(dev); 2664 mlx4_free_resource_tracker(dev,
2665 RES_TR_FREE_STRUCTS_ONLY);
2674 2666
2675 kfree(dev->caps.qp0_tunnel); 2667 iounmap(priv->kar);
2676 kfree(dev->caps.qp0_proxy); 2668 mlx4_uar_free(dev, &priv->driver_uar);
2677 kfree(dev->caps.qp1_tunnel); 2669 mlx4_cleanup_uar_table(dev);
2678 kfree(dev->caps.qp1_proxy); 2670 if (!mlx4_is_slave(dev))
2679 kfree(dev->dev_vfs); 2671 mlx4_clear_steering(dev);
2672 mlx4_free_eq_table(dev);
2673 if (mlx4_is_master(dev))
2674 mlx4_multi_func_cleanup(dev);
2675 mlx4_close_hca(dev);
2676 if (mlx4_is_slave(dev))
2677 mlx4_multi_func_cleanup(dev);
2678 mlx4_cmd_cleanup(dev);
2680 2679
2681 kfree(priv); 2680 if (dev->flags & MLX4_FLAG_MSI_X)
2682 pci_release_regions(pdev); 2681 pci_disable_msix(pdev);
2683 pci_disable_device(pdev); 2682 if (dev->flags & MLX4_FLAG_SRIOV) {
2684 pci_set_drvdata(pdev, NULL); 2683 mlx4_warn(dev, "Disabling SR-IOV\n");
2684 pci_disable_sriov(pdev);
2685 dev->num_vfs = 0;
2685 } 2686 }
2687
2688 if (!mlx4_is_slave(dev))
2689 mlx4_free_ownership(dev);
2690
2691 kfree(dev->caps.qp0_tunnel);
2692 kfree(dev->caps.qp0_proxy);
2693 kfree(dev->caps.qp1_tunnel);
2694 kfree(dev->caps.qp1_proxy);
2695 kfree(dev->dev_vfs);
2696
2697 pci_release_regions(pdev);
2698 pci_disable_device(pdev);
2699 memset(priv, 0, sizeof(*priv));
2700 priv->pci_dev_data = pci_dev_data;
2701 priv->removed = 1;
2702}
2703
2704static void mlx4_remove_one(struct pci_dev *pdev)
2705{
2706 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2707 struct mlx4_priv *priv = mlx4_priv(dev);
2708
2709 __mlx4_remove_one(pdev);
2710 kfree(priv);
2711 pci_set_drvdata(pdev, NULL);
2686} 2712}
2687 2713
2688int mlx4_restart_one(struct pci_dev *pdev) 2714int mlx4_restart_one(struct pci_dev *pdev)
@@ -2692,7 +2718,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
2692 int pci_dev_data; 2718 int pci_dev_data;
2693 2719
2694 pci_dev_data = priv->pci_dev_data; 2720 pci_dev_data = priv->pci_dev_data;
2695 mlx4_remove_one(pdev); 2721 __mlx4_remove_one(pdev);
2696 return __mlx4_init_one(pdev, pci_dev_data); 2722 return __mlx4_init_one(pdev, pci_dev_data);
2697} 2723}
2698 2724
@@ -2747,7 +2773,7 @@ MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
2747static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 2773static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
2748 pci_channel_state_t state) 2774 pci_channel_state_t state)
2749{ 2775{
2750 mlx4_remove_one(pdev); 2776 __mlx4_remove_one(pdev);
2751 2777
2752 return state == pci_channel_io_perm_failure ? 2778 return state == pci_channel_io_perm_failure ?
2753 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 2779 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
@@ -2755,11 +2781,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
2755 2781
2756static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 2782static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
2757{ 2783{
2758 const struct pci_device_id *id; 2784 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2759 int ret; 2785 struct mlx4_priv *priv = mlx4_priv(dev);
2786 int ret;
2760 2787
2761 id = pci_match_id(mlx4_pci_table, pdev); 2788 ret = __mlx4_init_one(pdev, priv->pci_dev_data);
2762 ret = __mlx4_init_one(pdev, id->driver_data);
2763 2789
2764 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 2790 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
2765} 2791}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index cf8be41abb36..212cea440f90 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -800,6 +800,7 @@ struct mlx4_priv {
800 spinlock_t ctx_lock; 800 spinlock_t ctx_lock;
801 801
802 int pci_dev_data; 802 int pci_dev_data;
803 int removed;
803 804
804 struct list_head pgdir_list; 805 struct list_head pgdir_list;
805 struct mutex pgdir_mutex; 806 struct mutex pgdir_mutex;
@@ -1194,6 +1195,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
1194 struct mlx4_cmd_mailbox *outbox, 1195 struct mlx4_cmd_mailbox *outbox,
1195 struct mlx4_cmd_info *cmd); 1196 struct mlx4_cmd_info *cmd);
1196 1197
1198int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
1199 struct mlx4_vhcr *vhcr,
1200 struct mlx4_cmd_mailbox *inbox,
1201 struct mlx4_cmd_mailbox *outbox,
1202 struct mlx4_cmd_info *cmd);
1203
1197int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1204int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
1198 struct mlx4_vhcr *vhcr, 1205 struct mlx4_vhcr *vhcr,
1199 struct mlx4_cmd_mailbox *inbox, 1206 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 7a733c287744..04d9b6fe3e80 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -319,7 +319,6 @@ struct mlx4_en_cq {
319 struct mlx4_cq mcq; 319 struct mlx4_cq mcq;
320 struct mlx4_hwq_resources wqres; 320 struct mlx4_hwq_resources wqres;
321 int ring; 321 int ring;
322 spinlock_t lock;
323 struct net_device *dev; 322 struct net_device *dev;
324 struct napi_struct napi; 323 struct napi_struct napi;
325 int size; 324 int size;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index cfcad26ed40f..b5b3549b0c8d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1106,6 +1106,9 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1106 } 1106 }
1107 1107
1108 if (found_ix >= 0) { 1108 if (found_ix >= 0) {
1109 /* Calculate a slave_gid which is the slave number in the gid
1110 * table and not a globally unique slave number.
1111 */
1109 if (found_ix < MLX4_ROCE_PF_GIDS) 1112 if (found_ix < MLX4_ROCE_PF_GIDS)
1110 slave_gid = 0; 1113 slave_gid = 0;
1111 else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) * 1114 else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
@@ -1118,41 +1121,43 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1118 ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) / 1121 ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
1119 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1; 1122 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
1120 1123
1124 /* Calculate the globally unique slave id */
1121 if (slave_gid) { 1125 if (slave_gid) {
1122 struct mlx4_active_ports exclusive_ports; 1126 struct mlx4_active_ports exclusive_ports;
1123 struct mlx4_active_ports actv_ports; 1127 struct mlx4_active_ports actv_ports;
1124 struct mlx4_slaves_pport slaves_pport_actv; 1128 struct mlx4_slaves_pport slaves_pport_actv;
1125 unsigned max_port_p_one; 1129 unsigned max_port_p_one;
1126 int num_slaves_before = 1; 1130 int num_vfs_before = 0;
1131 int candidate_slave_gid;
1127 1132
1133 /* Calculate how many VFs are on the previous port, if exists */
1128 for (i = 1; i < port; i++) { 1134 for (i = 1; i < port; i++) {
1129 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); 1135 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1130 set_bit(i, exclusive_ports.ports); 1136 set_bit(i - 1, exclusive_ports.ports);
1131 slaves_pport_actv = 1137 slaves_pport_actv =
1132 mlx4_phys_to_slaves_pport_actv( 1138 mlx4_phys_to_slaves_pport_actv(
1133 dev, &exclusive_ports); 1139 dev, &exclusive_ports);
1134 num_slaves_before += bitmap_weight( 1140 num_vfs_before += bitmap_weight(
1135 slaves_pport_actv.slaves, 1141 slaves_pport_actv.slaves,
1136 dev->num_vfs + 1); 1142 dev->num_vfs + 1);
1137 } 1143 }
1138 1144
1139 if (slave_gid < num_slaves_before) { 1145 /* candidate_slave_gid isn't necessarily the correct slave, but
1140 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); 1146 * it has the same number of ports and is assigned to the same
1141 set_bit(port - 1, exclusive_ports.ports); 1147 * ports as the real slave we're looking for. On dual port VF,
1142 slaves_pport_actv = 1148 * slave_gid = [single port VFs on port <port>] +
1143 mlx4_phys_to_slaves_pport_actv( 1149 * [offset of the current slave from the first dual port VF] +
1144 dev, &exclusive_ports); 1150 * 1 (for the PF).
1145 slave_gid += bitmap_weight( 1151 */
1146 slaves_pport_actv.slaves, 1152 candidate_slave_gid = slave_gid + num_vfs_before;
1147 dev->num_vfs + 1) - 1153
1148 num_slaves_before; 1154 actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
1149 }
1150 actv_ports = mlx4_get_active_ports(dev, slave_gid);
1151 max_port_p_one = find_first_bit( 1155 max_port_p_one = find_first_bit(
1152 actv_ports.ports, dev->caps.num_ports) + 1156 actv_ports.ports, dev->caps.num_ports) +
1153 bitmap_weight(actv_ports.ports, 1157 bitmap_weight(actv_ports.ports,
1154 dev->caps.num_ports) + 1; 1158 dev->caps.num_ports) + 1;
1155 1159
1160 /* Calculate the real slave number */
1156 for (i = 1; i < max_port_p_one; i++) { 1161 for (i = 1; i < max_port_p_one; i++) {
1157 if (i == port) 1162 if (i == port)
1158 continue; 1163 continue;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 61d64ebffd56..fbd32af89c7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -389,6 +389,41 @@ err_icm:
389 389
390EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 390EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
391 391
392#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
393int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
394 enum mlx4_update_qp_attr attr,
395 struct mlx4_update_qp_params *params)
396{
397 struct mlx4_cmd_mailbox *mailbox;
398 struct mlx4_update_qp_context *cmd;
399 u64 pri_addr_path_mask = 0;
400 int err = 0;
401
402 mailbox = mlx4_alloc_cmd_mailbox(dev);
403 if (IS_ERR(mailbox))
404 return PTR_ERR(mailbox);
405
406 cmd = (struct mlx4_update_qp_context *)mailbox->buf;
407
408 if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
409 return -EINVAL;
410
411 if (attr & MLX4_UPDATE_QP_SMAC) {
412 pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
413 cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
414 }
415
416 cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
417
418 err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
419 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
420 MLX4_CMD_NATIVE);
421
422 mlx4_free_cmd_mailbox(dev, mailbox);
423 return err;
424}
425EXPORT_SYMBOL_GPL(mlx4_update_qp);
426
392void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) 427void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
393{ 428{
394 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 429 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 3b5f53ef29b2..8f1254a79832 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3733,6 +3733,25 @@ static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3733 } 3733 }
3734} 3734}
3735 3735
3736static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3737 u8 *gid, enum mlx4_protocol prot)
3738{
3739 int real_port;
3740
3741 if (prot != MLX4_PROT_ETH)
3742 return 0;
3743
3744 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3745 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3746 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3747 if (real_port < 0)
3748 return -EINVAL;
3749 gid[5] = real_port;
3750 }
3751
3752 return 0;
3753}
3754
3736int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 3755int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3737 struct mlx4_vhcr *vhcr, 3756 struct mlx4_vhcr *vhcr,
3738 struct mlx4_cmd_mailbox *inbox, 3757 struct mlx4_cmd_mailbox *inbox,
@@ -3768,6 +3787,10 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3768 if (err) 3787 if (err)
3769 goto ex_detach; 3788 goto ex_detach;
3770 } else { 3789 } else {
3790 err = mlx4_adjust_port(dev, slave, gid, prot);
3791 if (err)
3792 goto ex_put;
3793
3771 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id); 3794 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3772 if (err) 3795 if (err)
3773 goto ex_put; 3796 goto ex_put;
@@ -3872,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
3872 3895
3873} 3896}
3874 3897
3898#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
3899int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
3900 struct mlx4_vhcr *vhcr,
3901 struct mlx4_cmd_mailbox *inbox,
3902 struct mlx4_cmd_mailbox *outbox,
3903 struct mlx4_cmd_info *cmd_info)
3904{
3905 int err;
3906 u32 qpn = vhcr->in_modifier & 0xffffff;
3907 struct res_qp *rqp;
3908 u64 mac;
3909 unsigned port;
3910 u64 pri_addr_path_mask;
3911 struct mlx4_update_qp_context *cmd;
3912 int smac_index;
3913
3914 cmd = (struct mlx4_update_qp_context *)inbox->buf;
3915
3916 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
3917 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
3918 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
3919 return -EPERM;
3920
3921 /* Just change the smac for the QP */
3922 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3923 if (err) {
3924 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
3925 return err;
3926 }
3927
3928 port = (rqp->sched_queue >> 6 & 1) + 1;
3929 smac_index = cmd->qp_context.pri_path.grh_mylmc;
3930 err = mac_find_smac_ix_in_slave(dev, slave, port,
3931 smac_index, &mac);
3932 if (err) {
3933 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
3934 qpn, smac_index);
3935 goto err_mac;
3936 }
3937
3938 err = mlx4_cmd(dev, inbox->dma,
3939 vhcr->in_modifier, 0,
3940 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
3941 MLX4_CMD_NATIVE);
3942 if (err) {
3943 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
3944 goto err_mac;
3945 }
3946
3947err_mac:
3948 put_res(dev, slave, qpn, RES_QP);
3949 return err;
3950}
3951
3875int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 3952int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3876 struct mlx4_vhcr *vhcr, 3953 struct mlx4_vhcr *vhcr,
3877 struct mlx4_cmd_mailbox *inbox, 3954 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 7b52a88923ef..f785d01c7d12 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1719,22 +1719,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1719 tx_ring->producer; 1719 tx_ring->producer;
1720} 1720}
1721 1721
1722static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
1723 struct net_device *netdev)
1724{
1725 int err;
1726
1727 netdev->num_tx_queues = adapter->drv_tx_rings;
1728 netdev->real_num_tx_queues = adapter->drv_tx_rings;
1729
1730 err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
1731 if (err)
1732 netdev_err(netdev, "failed to set %d Tx queues\n",
1733 adapter->drv_tx_rings);
1734
1735 return err;
1736}
1737
1738struct qlcnic_nic_template { 1722struct qlcnic_nic_template {
1739 int (*config_bridged_mode) (struct qlcnic_adapter *, u32); 1723 int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
1740 int (*config_led) (struct qlcnic_adapter *, u32, u32); 1724 int (*config_led) (struct qlcnic_adapter *, u32, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index b48737dcd3c5..ba20c721ee97 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2139,8 +2139,6 @@ static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
2139 ahw->max_mac_filters = nic_info.max_mac_filters; 2139 ahw->max_mac_filters = nic_info.max_mac_filters;
2140 ahw->max_mtu = nic_info.max_mtu; 2140 ahw->max_mtu = nic_info.max_mtu;
2141 2141
2142 adapter->max_tx_rings = ahw->max_tx_ques;
2143 adapter->max_sds_rings = ahw->max_rx_ques;
2144 /* eSwitch capability indicates vNIC mode. 2142 /* eSwitch capability indicates vNIC mode.
2145 * vNIC and SRIOV are mutually exclusive operational modes. 2143 * vNIC and SRIOV are mutually exclusive operational modes.
2146 * If SR-IOV capability is detected, SR-IOV physical function 2144 * If SR-IOV capability is detected, SR-IOV physical function
@@ -2161,6 +2159,7 @@ static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
2161int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) 2159int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2162{ 2160{
2163 struct qlcnic_hardware_context *ahw = adapter->ahw; 2161 struct qlcnic_hardware_context *ahw = adapter->ahw;
2162 u16 max_sds_rings, max_tx_rings;
2164 int ret; 2163 int ret;
2165 2164
2166 ret = qlcnic_83xx_get_nic_configuration(adapter); 2165 ret = qlcnic_83xx_get_nic_configuration(adapter);
@@ -2173,18 +2172,21 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2173 if (qlcnic_83xx_config_vnic_opmode(adapter)) 2172 if (qlcnic_83xx_config_vnic_opmode(adapter))
2174 return -EIO; 2173 return -EIO;
2175 2174
2176 adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS; 2175 max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
2177 adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS; 2176 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
2178 } else if (ret == QLC_83XX_DEFAULT_OPMODE) { 2177 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
2179 ahw->nic_mode = QLCNIC_DEFAULT_MODE; 2178 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
2180 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; 2179 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
2181 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; 2180 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
2182 adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; 2181 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
2183 adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS; 2182 max_tx_rings = QLCNIC_MAX_TX_RINGS;
2184 } else { 2183 } else {
2185 return -EIO; 2184 return -EIO;
2186 } 2185 }
2187 2186
2187 adapter->max_sds_rings = min(ahw->max_rx_ques, max_sds_rings);
2188 adapter->max_tx_rings = min(ahw->max_tx_ques, max_tx_rings);
2189
2188 return 0; 2190 return 0;
2189} 2191}
2190 2192
@@ -2348,15 +2350,16 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
2348 goto disable_intr; 2350 goto disable_intr;
2349 } 2351 }
2350 2352
2353 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
2354
2351 err = qlcnic_83xx_setup_mbx_intr(adapter); 2355 err = qlcnic_83xx_setup_mbx_intr(adapter);
2352 if (err) 2356 if (err)
2353 goto disable_mbx_intr; 2357 goto disable_mbx_intr;
2354 2358
2355 qlcnic_83xx_clear_function_resources(adapter); 2359 qlcnic_83xx_clear_function_resources(adapter);
2356 2360 qlcnic_dcb_enable(adapter->dcb);
2357 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
2358
2359 qlcnic_83xx_initialize_nic(adapter, 1); 2361 qlcnic_83xx_initialize_nic(adapter, 1);
2362 qlcnic_dcb_get_info(adapter->dcb);
2360 2363
2361 /* Configure default, SR-IOV or Virtual NIC mode of operation */ 2364 /* Configure default, SR-IOV or Virtual NIC mode of operation */
2362 err = qlcnic_83xx_configure_opmode(adapter); 2365 err = qlcnic_83xx_configure_opmode(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 64dcbf33d8f0..c1e11f5715b0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -883,8 +883,6 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
883 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); 883 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
884 npar_info->capabilities = le32_to_cpu(nic_info->capabilities); 884 npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
885 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); 885 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
886 adapter->max_tx_rings = npar_info->max_tx_ques;
887 adapter->max_sds_rings = npar_info->max_rx_ques;
888 } 886 }
889 887
890 qlcnic_free_mbx_args(&cmd); 888 qlcnic_free_mbx_args(&cmd);
@@ -1356,6 +1354,7 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1356 arg2 &= ~BIT_3; 1354 arg2 &= ~BIT_3;
1357 break; 1355 break;
1358 case QLCNIC_ADD_VLAN: 1356 case QLCNIC_ADD_VLAN:
1357 arg1 &= ~(0x0ffff << 16);
1359 arg1 |= (BIT_2 | BIT_5); 1358 arg1 |= (BIT_2 | BIT_5);
1360 arg1 |= (esw_cfg->vlan_id << 16); 1359 arg1 |= (esw_cfg->vlan_id << 16);
1361 break; 1360 break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 7d4f54912bad..a51fe18f09a8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -330,8 +330,6 @@ static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
330 goto out_free_cfg; 330 goto out_free_cfg;
331 } 331 }
332 332
333 qlcnic_dcb_get_info(dcb);
334
335 return 0; 333 return 0;
336out_free_cfg: 334out_free_cfg:
337 kfree(dcb->cfg); 335 kfree(dcb->cfg);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 309d05640883..7e55e88a81bf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -670,7 +670,7 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
670 else 670 else
671 num_msix += adapter->drv_tx_rings; 671 num_msix += adapter->drv_tx_rings;
672 672
673 if (adapter->drv_rss_rings > 0) 673 if (adapter->drv_rss_rings > 0)
674 num_msix += adapter->drv_rss_rings; 674 num_msix += adapter->drv_rss_rings;
675 else 675 else
676 num_msix += adapter->drv_sds_rings; 676 num_msix += adapter->drv_sds_rings;
@@ -686,19 +686,15 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
686 return -ENOMEM; 686 return -ENOMEM;
687 } 687 }
688 688
689restore:
690 for (vector = 0; vector < num_msix; vector++) 689 for (vector = 0; vector < num_msix; vector++)
691 adapter->msix_entries[vector].entry = vector; 690 adapter->msix_entries[vector].entry = vector;
692 691
692restore:
693 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); 693 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
694 if (err == 0) { 694 if (err > 0) {
695 adapter->ahw->num_msix = num_msix; 695 if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
696 if (adapter->drv_tss_rings > 0) 696 return -ENOSPC;
697 adapter->drv_tx_rings = adapter->drv_tss_rings;
698 697
699 if (adapter->drv_rss_rings > 0)
700 adapter->drv_sds_rings = adapter->drv_rss_rings;
701 } else {
702 netdev_info(adapter->netdev, 698 netdev_info(adapter->netdev,
703 "Unable to allocate %d MSI-X vectors, Available vectors %d\n", 699 "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
704 num_msix, err); 700 num_msix, err);
@@ -716,12 +712,20 @@ restore:
716 "Restoring %d Tx, %d SDS rings for total %d vectors.\n", 712 "Restoring %d Tx, %d SDS rings for total %d vectors.\n",
717 adapter->drv_tx_rings, adapter->drv_sds_rings, 713 adapter->drv_tx_rings, adapter->drv_sds_rings,
718 num_msix); 714 num_msix);
719 goto restore;
720 715
721 err = -EIO; 716 goto restore;
717 } else if (err < 0) {
718 return err;
722 } 719 }
723 720
724 return err; 721 adapter->ahw->num_msix = num_msix;
722 if (adapter->drv_tss_rings > 0)
723 adapter->drv_tx_rings = adapter->drv_tss_rings;
724
725 if (adapter->drv_rss_rings > 0)
726 adapter->drv_sds_rings = adapter->drv_rss_rings;
727
728 return 0;
725} 729}
726 730
727int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) 731int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
@@ -2202,6 +2206,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
2202 ahw->max_uc_count = count; 2206 ahw->max_uc_count = count;
2203} 2207}
2204 2208
2209static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
2210 u8 tx_queues, u8 rx_queues)
2211{
2212 struct net_device *netdev = adapter->netdev;
2213 int err = 0;
2214
2215 if (tx_queues) {
2216 err = netif_set_real_num_tx_queues(netdev, tx_queues);
2217 if (err) {
2218 netdev_err(netdev, "failed to set %d Tx queues\n",
2219 tx_queues);
2220 return err;
2221 }
2222 }
2223
2224 if (rx_queues) {
2225 err = netif_set_real_num_rx_queues(netdev, rx_queues);
2226 if (err)
2227 netdev_err(netdev, "failed to set %d Rx queues\n",
2228 rx_queues);
2229 }
2230
2231 return err;
2232}
2233
2205int 2234int
2206qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, 2235qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2207 int pci_using_dac) 2236 int pci_using_dac)
@@ -2265,7 +2294,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2265 netdev->priv_flags |= IFF_UNICAST_FLT; 2294 netdev->priv_flags |= IFF_UNICAST_FLT;
2266 netdev->irq = adapter->msix_entries[0].vector; 2295 netdev->irq = adapter->msix_entries[0].vector;
2267 2296
2268 err = qlcnic_set_real_num_queues(adapter, netdev); 2297 err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
2298 adapter->drv_sds_rings);
2269 if (err) 2299 if (err)
2270 return err; 2300 return err;
2271 2301
@@ -2370,6 +2400,14 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
2370 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd); 2400 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
2371} 2401}
2372 2402
2403/* Reset firmware API lock */
2404static void qlcnic_reset_api_lock(struct qlcnic_adapter *adapter)
2405{
2406 qlcnic_api_lock(adapter);
2407 qlcnic_api_unlock(adapter);
2408}
2409
2410
2373static int 2411static int
2374qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2412qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2375{ 2413{
@@ -2472,6 +2510,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2472 if (qlcnic_82xx_check(adapter)) { 2510 if (qlcnic_82xx_check(adapter)) {
2473 qlcnic_check_vf(adapter, ent); 2511 qlcnic_check_vf(adapter, ent);
2474 adapter->portnum = adapter->ahw->pci_func; 2512 adapter->portnum = adapter->ahw->pci_func;
2513 qlcnic_reset_api_lock(adapter);
2475 err = qlcnic_start_firmware(adapter); 2514 err = qlcnic_start_firmware(adapter);
2476 if (err) { 2515 if (err) {
2477 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n" 2516 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
@@ -2528,8 +2567,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2528 goto err_out_free_hw; 2567 goto err_out_free_hw;
2529 } 2568 }
2530 2569
2531 qlcnic_dcb_enable(adapter->dcb);
2532
2533 if (qlcnic_read_mac_addr(adapter)) 2570 if (qlcnic_read_mac_addr(adapter))
2534 dev_warn(&pdev->dev, "failed to read mac addr\n"); 2571 dev_warn(&pdev->dev, "failed to read mac addr\n");
2535 2572
@@ -2549,7 +2586,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2549 "Device does not support MSI interrupts\n"); 2586 "Device does not support MSI interrupts\n");
2550 2587
2551 if (qlcnic_82xx_check(adapter)) { 2588 if (qlcnic_82xx_check(adapter)) {
2589 qlcnic_dcb_enable(adapter->dcb);
2590 qlcnic_dcb_get_info(adapter->dcb);
2552 err = qlcnic_setup_intr(adapter); 2591 err = qlcnic_setup_intr(adapter);
2592
2553 if (err) { 2593 if (err) {
2554 dev_err(&pdev->dev, "Failed to setup interrupt\n"); 2594 dev_err(&pdev->dev, "Failed to setup interrupt\n");
2555 goto err_out_disable_msi; 2595 goto err_out_disable_msi;
@@ -2929,9 +2969,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
2929 tx_ring->tx_stats.xmit_called, 2969 tx_ring->tx_stats.xmit_called,
2930 tx_ring->tx_stats.xmit_on, 2970 tx_ring->tx_stats.xmit_on,
2931 tx_ring->tx_stats.xmit_off); 2971 tx_ring->tx_stats.xmit_off);
2972
2973 if (tx_ring->crb_intr_mask)
2974 netdev_info(netdev, "crb_intr_mask=%d\n",
2975 readl(tx_ring->crb_intr_mask));
2976
2932 netdev_info(netdev, 2977 netdev_info(netdev,
2933 "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n", 2978 "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
2934 readl(tx_ring->crb_intr_mask),
2935 readl(tx_ring->crb_cmd_producer), 2979 readl(tx_ring->crb_cmd_producer),
2936 tx_ring->producer, tx_ring->sw_consumer, 2980 tx_ring->producer, tx_ring->sw_consumer,
2937 le32_to_cpu(*(tx_ring->hw_consumer))); 2981 le32_to_cpu(*(tx_ring->hw_consumer)));
@@ -3964,12 +4008,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
3964int qlcnic_setup_rings(struct qlcnic_adapter *adapter) 4008int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
3965{ 4009{
3966 struct net_device *netdev = adapter->netdev; 4010 struct net_device *netdev = adapter->netdev;
4011 u8 tx_rings, rx_rings;
3967 int err; 4012 int err;
3968 4013
3969 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) 4014 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3970 return -EBUSY; 4015 return -EBUSY;
3971 4016
4017 tx_rings = adapter->drv_tss_rings;
4018 rx_rings = adapter->drv_rss_rings;
4019
3972 netif_device_detach(netdev); 4020 netif_device_detach(netdev);
4021
4022 err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
4023 if (err)
4024 goto done;
4025
3973 if (netif_running(netdev)) 4026 if (netif_running(netdev))
3974 __qlcnic_down(adapter, netdev); 4027 __qlcnic_down(adapter, netdev);
3975 4028
@@ -3989,7 +4042,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
3989 return err; 4042 return err;
3990 } 4043 }
3991 4044
3992 netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings); 4045 /* Check if we need to update real_num_{tx|rx}_queues because
4046 * qlcnic_setup_intr() may change Tx/Rx rings size
4047 */
4048 if ((tx_rings != adapter->drv_tx_rings) ||
4049 (rx_rings != adapter->drv_sds_rings)) {
4050 err = qlcnic_set_real_num_queues(adapter,
4051 adapter->drv_tx_rings,
4052 adapter->drv_sds_rings);
4053 if (err)
4054 goto done;
4055 }
3993 4056
3994 if (qlcnic_83xx_check(adapter)) { 4057 if (qlcnic_83xx_check(adapter)) {
3995 qlcnic_83xx_initialize_nic(adapter, 1); 4058 qlcnic_83xx_initialize_nic(adapter, 1);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 0638c1810d54..6afe9c1f5ab9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1370,7 +1370,7 @@ static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1370 1370
1371 rsp = qlcnic_sriov_alloc_bc_trans(&trans); 1371 rsp = qlcnic_sriov_alloc_bc_trans(&trans);
1372 if (rsp) 1372 if (rsp)
1373 return rsp; 1373 goto free_cmd;
1374 1374
1375 rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND); 1375 rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
1376 if (rsp) 1376 if (rsp)
@@ -1425,6 +1425,13 @@ err_out:
1425 1425
1426cleanup_transaction: 1426cleanup_transaction:
1427 qlcnic_sriov_cleanup_transaction(trans); 1427 qlcnic_sriov_cleanup_transaction(trans);
1428
1429free_cmd:
1430 if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
1431 qlcnic_free_mbx_args(cmd);
1432 kfree(cmd);
1433 }
1434
1428 return rsp; 1435 return rsp;
1429} 1436}
1430 1437
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 14f748cbf0de..280137991544 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -461,6 +461,16 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
461{ 461{
462 struct net_device *netdev = adapter->netdev; 462 struct net_device *netdev = adapter->netdev;
463 463
464 if (pci_vfs_assigned(adapter->pdev)) {
465 netdev_err(adapter->netdev,
466 "SR-IOV VFs belonging to port %d are assigned to VMs. SR-IOV can not be disabled on this port\n",
467 adapter->portnum);
468 netdev_info(adapter->netdev,
469 "Please detach SR-IOV VFs belonging to port %d from VMs, and then try to disable SR-IOV on this port\n",
470 adapter->portnum);
471 return -EPERM;
472 }
473
464 rtnl_lock(); 474 rtnl_lock();
465 if (netif_running(netdev)) 475 if (netif_running(netdev))
466 __qlcnic_down(adapter, netdev); 476 __qlcnic_down(adapter, netdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 448d156c3d08..cd346e27f2e1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -354,7 +354,7 @@ int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
354{ 354{
355 int i; 355 int i;
356 356
357 for (i = 0; i < adapter->ahw->max_vnic_func; i++) { 357 for (i = 0; i < adapter->ahw->total_nic_func; i++) {
358 if (adapter->npars[i].pci_func == pci_func) 358 if (adapter->npars[i].pci_func == pci_func)
359 return i; 359 return i;
360 } 360 }
@@ -720,6 +720,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
720 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 720 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
721 struct qlcnic_npar_func_cfg *np_cfg; 721 struct qlcnic_npar_func_cfg *np_cfg;
722 struct qlcnic_info nic_info; 722 struct qlcnic_info nic_info;
723 u8 pci_func;
723 int i, ret; 724 int i, ret;
724 u32 count; 725 u32 count;
725 726
@@ -729,26 +730,28 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
729 730
730 count = size / sizeof(struct qlcnic_npar_func_cfg); 731 count = size / sizeof(struct qlcnic_npar_func_cfg);
731 for (i = 0; i < adapter->ahw->total_nic_func; i++) { 732 for (i = 0; i < adapter->ahw->total_nic_func; i++) {
732 if (qlcnic_is_valid_nic_func(adapter, i) < 0)
733 continue;
734 if (adapter->npars[i].pci_func >= count) { 733 if (adapter->npars[i].pci_func >= count) {
735 dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n", 734 dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
736 __func__, adapter->ahw->total_nic_func, count); 735 __func__, adapter->ahw->total_nic_func, count);
737 continue; 736 continue;
738 } 737 }
739 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
740 if (ret)
741 return ret;
742 if (!adapter->npars[i].eswitch_status) 738 if (!adapter->npars[i].eswitch_status)
743 continue; 739 continue;
744 np_cfg[i].pci_func = i; 740 pci_func = adapter->npars[i].pci_func;
745 np_cfg[i].op_mode = (u8)nic_info.op_mode; 741 if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
746 np_cfg[i].port_num = nic_info.phys_port; 742 continue;
747 np_cfg[i].fw_capab = nic_info.capabilities; 743 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
748 np_cfg[i].min_bw = nic_info.min_tx_bw; 744 if (ret)
749 np_cfg[i].max_bw = nic_info.max_tx_bw; 745 return ret;
750 np_cfg[i].max_tx_queues = nic_info.max_tx_ques; 746
751 np_cfg[i].max_rx_queues = nic_info.max_rx_ques; 747 np_cfg[pci_func].pci_func = pci_func;
748 np_cfg[pci_func].op_mode = (u8)nic_info.op_mode;
749 np_cfg[pci_func].port_num = nic_info.phys_port;
750 np_cfg[pci_func].fw_capab = nic_info.capabilities;
751 np_cfg[pci_func].min_bw = nic_info.min_tx_bw;
752 np_cfg[pci_func].max_bw = nic_info.max_tx_bw;
753 np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques;
754 np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques;
752 } 755 }
753 return size; 756 return size;
754} 757}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index 6203c7d8550f..45019649bbbd 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -358,6 +358,8 @@ struct sxgbe_core_ops {
358 /* Enable disable checksum offload operations */ 358 /* Enable disable checksum offload operations */
359 void (*enable_rx_csum)(void __iomem *ioaddr); 359 void (*enable_rx_csum)(void __iomem *ioaddr);
360 void (*disable_rx_csum)(void __iomem *ioaddr); 360 void (*disable_rx_csum)(void __iomem *ioaddr);
361 void (*enable_rxqueue)(void __iomem *ioaddr, int queue_num);
362 void (*disable_rxqueue)(void __iomem *ioaddr, int queue_num);
361}; 363};
362 364
363const struct sxgbe_core_ops *sxgbe_get_core_ops(void); 365const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
index c4da7a2b002a..58c35692560e 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
@@ -165,6 +165,26 @@ static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed)
165 writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG); 165 writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
166} 166}
167 167
168static void sxgbe_core_enable_rxqueue(void __iomem *ioaddr, int queue_num)
169{
170 u32 reg_val;
171
172 reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
173 reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
174 reg_val |= SXGBE_CORE_RXQ_ENABLE;
175 writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
176}
177
178static void sxgbe_core_disable_rxqueue(void __iomem *ioaddr, int queue_num)
179{
180 u32 reg_val;
181
182 reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
183 reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
184 reg_val |= SXGBE_CORE_RXQ_DISABLE;
185 writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
186}
187
168static void sxgbe_set_eee_mode(void __iomem *ioaddr) 188static void sxgbe_set_eee_mode(void __iomem *ioaddr)
169{ 189{
170 u32 ctrl; 190 u32 ctrl;
@@ -254,6 +274,8 @@ static const struct sxgbe_core_ops core_ops = {
254 .set_eee_pls = sxgbe_set_eee_pls, 274 .set_eee_pls = sxgbe_set_eee_pls,
255 .enable_rx_csum = sxgbe_enable_rx_csum, 275 .enable_rx_csum = sxgbe_enable_rx_csum,
256 .disable_rx_csum = sxgbe_disable_rx_csum, 276 .disable_rx_csum = sxgbe_disable_rx_csum,
277 .enable_rxqueue = sxgbe_core_enable_rxqueue,
278 .disable_rxqueue = sxgbe_core_disable_rxqueue,
257}; 279};
258 280
259const struct sxgbe_core_ops *sxgbe_get_core_ops(void) 281const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
index e896dbbd2e15..2686bb5b6765 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
@@ -45,10 +45,10 @@ static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd,
45 p->tdes23.tx_rd_des23.first_desc = is_fd; 45 p->tdes23.tx_rd_des23.first_desc = is_fd;
46 p->tdes23.tx_rd_des23.buf1_size = buf1_len; 46 p->tdes23.tx_rd_des23.buf1_size = buf1_len;
47 47
48 p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.total_pkt_len = pkt_len; 48 p->tdes23.tx_rd_des23.tx_pkt_len.pkt_len.total_pkt_len = pkt_len;
49 49
50 if (cksum) 50 if (cksum)
51 p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.cksum_ctl = cic_full; 51 p->tdes23.tx_rd_des23.cksum_ctl = cic_full;
52} 52}
53 53
54/* Set VLAN control information */ 54/* Set VLAN control information */
@@ -233,6 +233,12 @@ static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p)
233 p->rdes23.rx_rd_des23.own_bit = 1; 233 p->rdes23.rx_rd_des23.own_bit = 1;
234} 234}
235 235
236/* Set Interrupt on completion bit */
237static void sxgbe_set_rx_int_on_com(struct sxgbe_rx_norm_desc *p)
238{
239 p->rdes23.rx_rd_des23.int_on_com = 1;
240}
241
236/* Get the receive frame size */ 242/* Get the receive frame size */
237static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p) 243static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p)
238{ 244{
@@ -498,6 +504,7 @@ static const struct sxgbe_desc_ops desc_ops = {
498 .init_rx_desc = sxgbe_init_rx_desc, 504 .init_rx_desc = sxgbe_init_rx_desc,
499 .get_rx_owner = sxgbe_get_rx_owner, 505 .get_rx_owner = sxgbe_get_rx_owner,
500 .set_rx_owner = sxgbe_set_rx_owner, 506 .set_rx_owner = sxgbe_set_rx_owner,
507 .set_rx_int_on_com = sxgbe_set_rx_int_on_com,
501 .get_rx_frame_len = sxgbe_get_rx_frame_len, 508 .get_rx_frame_len = sxgbe_get_rx_frame_len,
502 .get_rx_fd_status = sxgbe_get_rx_fd_status, 509 .get_rx_fd_status = sxgbe_get_rx_fd_status,
503 .get_rx_ld_status = sxgbe_get_rx_ld_status, 510 .get_rx_ld_status = sxgbe_get_rx_ld_status,
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
index 838cb9fb0ea9..18609324db72 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
@@ -39,22 +39,22 @@ struct sxgbe_tx_norm_desc {
39 u32 int_on_com:1; 39 u32 int_on_com:1;
40 /* TDES3 */ 40 /* TDES3 */
41 union { 41 union {
42 u32 tcp_payload_len:18; 42 u16 tcp_payload_len;
43 struct { 43 struct {
44 u32 total_pkt_len:15; 44 u32 total_pkt_len:15;
45 u32 reserved1:1; 45 u32 reserved1:1;
46 u32 cksum_ctl:2; 46 } pkt_len;
47 } cksum_pktlen;
48 } tx_pkt_len; 47 } tx_pkt_len;
49 48
50 u32 tse_bit:1; 49 u16 cksum_ctl:2;
51 u32 tcp_hdr_len:4; 50 u16 tse_bit:1;
52 u32 sa_insert_ctl:3; 51 u16 tcp_hdr_len:4;
53 u32 crc_pad_ctl:2; 52 u16 sa_insert_ctl:3;
54 u32 last_desc:1; 53 u16 crc_pad_ctl:2;
55 u32 first_desc:1; 54 u16 last_desc:1;
56 u32 ctxt_bit:1; 55 u16 first_desc:1;
57 u32 own_bit:1; 56 u16 ctxt_bit:1;
57 u16 own_bit:1;
58 } tx_rd_des23; 58 } tx_rd_des23;
59 59
60 /* tx write back Desc 2,3 */ 60 /* tx write back Desc 2,3 */
@@ -70,25 +70,20 @@ struct sxgbe_tx_norm_desc {
70 70
71struct sxgbe_rx_norm_desc { 71struct sxgbe_rx_norm_desc {
72 union { 72 union {
73 u32 rdes0; /* buf1 address */ 73 u64 rdes01; /* buf1 address */
74 struct { 74 union {
75 u32 out_vlan_tag:16; 75 u32 out_vlan_tag:16;
76 u32 in_vlan_tag:16; 76 u32 in_vlan_tag:16;
77 } wb_rx_des0; 77 u32 rss_hash;
78 } rd_wb_des0; 78 } rx_wb_des01;
79 79 } rdes01;
80 union {
81 u32 rdes1; /* buf2 address or buf1[63:32] */
82 u32 rss_hash; /* Write-back RX */
83 } rd_wb_des1;
84 80
85 union { 81 union {
86 /* RX Read format Desc 2,3 */ 82 /* RX Read format Desc 2,3 */
87 struct{ 83 struct{
88 /* RDES2 */ 84 /* RDES2 */
89 u32 buf2_addr; 85 u64 buf2_addr:62;
90 /* RDES3 */ 86 /* RDES3 */
91 u32 buf2_hi_addr:30;
92 u32 int_on_com:1; 87 u32 int_on_com:1;
93 u32 own_bit:1; 88 u32 own_bit:1;
94 } rx_rd_des23; 89 } rx_rd_des23;
@@ -263,6 +258,9 @@ struct sxgbe_desc_ops {
263 /* Set own bit */ 258 /* Set own bit */
264 void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p); 259 void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p);
265 260
261 /* Set Interrupt on completion bit */
262 void (*set_rx_int_on_com)(struct sxgbe_rx_norm_desc *p);
263
266 /* Get the receive frame size */ 264 /* Get the receive frame size */
267 int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p); 265 int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p);
268 266
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
index 4d989ff6c978..bb9b5b8afc5f 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
@@ -23,21 +23,8 @@
23/* DMA core initialization */ 23/* DMA core initialization */
24static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map) 24static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
25{ 25{
26 int retry_count = 10;
27 u32 reg_val; 26 u32 reg_val;
28 27
29 /* reset the DMA */
30 writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG);
31 while (retry_count--) {
32 if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) &
33 SXGBE_DMA_SOFT_RESET))
34 break;
35 mdelay(10);
36 }
37
38 if (retry_count < 0)
39 return -EBUSY;
40
41 reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG); 28 reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
42 29
43 /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register. 30 /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 27e8c824b204..82a9a983869f 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1076,6 +1076,9 @@ static int sxgbe_open(struct net_device *dev)
1076 1076
1077 /* Initialize the MAC Core */ 1077 /* Initialize the MAC Core */
1078 priv->hw->mac->core_init(priv->ioaddr); 1078 priv->hw->mac->core_init(priv->ioaddr);
1079 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
1080 priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num);
1081 }
1079 1082
1080 /* Request the IRQ lines */ 1083 /* Request the IRQ lines */
1081 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt, 1084 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
@@ -1453,6 +1456,7 @@ static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
1453 /* Added memory barrier for RX descriptor modification */ 1456 /* Added memory barrier for RX descriptor modification */
1454 wmb(); 1457 wmb();
1455 priv->hw->desc->set_rx_owner(p); 1458 priv->hw->desc->set_rx_owner(p);
1459 priv->hw->desc->set_rx_int_on_com(p);
1456 /* Added memory barrier for RX descriptor modification */ 1460 /* Added memory barrier for RX descriptor modification */
1457 wmb(); 1461 wmb();
1458 } 1462 }
@@ -2070,6 +2074,24 @@ static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
2070 return 0; 2074 return 0;
2071} 2075}
2072 2076
2077static int sxgbe_sw_reset(void __iomem *addr)
2078{
2079 int retry_count = 10;
2080
2081 writel(SXGBE_DMA_SOFT_RESET, addr + SXGBE_DMA_MODE_REG);
2082 while (retry_count--) {
2083 if (!(readl(addr + SXGBE_DMA_MODE_REG) &
2084 SXGBE_DMA_SOFT_RESET))
2085 break;
2086 mdelay(10);
2087 }
2088
2089 if (retry_count < 0)
2090 return -EBUSY;
2091
2092 return 0;
2093}
2094
2073/** 2095/**
2074 * sxgbe_drv_probe 2096 * sxgbe_drv_probe
2075 * @device: device pointer 2097 * @device: device pointer
@@ -2102,6 +2124,10 @@ struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
2102 priv->plat = plat_dat; 2124 priv->plat = plat_dat;
2103 priv->ioaddr = addr; 2125 priv->ioaddr = addr;
2104 2126
2127 ret = sxgbe_sw_reset(priv->ioaddr);
2128 if (ret)
2129 goto error_free_netdev;
2130
2105 /* Verify driver arguments */ 2131 /* Verify driver arguments */
2106 sxgbe_verify_args(); 2132 sxgbe_verify_args();
2107 2133
@@ -2218,9 +2244,14 @@ error_free_netdev:
2218int sxgbe_drv_remove(struct net_device *ndev) 2244int sxgbe_drv_remove(struct net_device *ndev)
2219{ 2245{
2220 struct sxgbe_priv_data *priv = netdev_priv(ndev); 2246 struct sxgbe_priv_data *priv = netdev_priv(ndev);
2247 u8 queue_num;
2221 2248
2222 netdev_info(ndev, "%s: removing driver\n", __func__); 2249 netdev_info(ndev, "%s: removing driver\n", __func__);
2223 2250
2251 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
2252 priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num);
2253 }
2254
2224 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); 2255 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
2225 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES); 2256 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
2226 2257
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
index 01af2cbb479d..43ccb4a6de15 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
@@ -27,7 +27,7 @@
27#define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */ 27#define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */
28#define SXGBE_SMA_READ_CMD 0x03 /* read command */ 28#define SXGBE_SMA_READ_CMD 0x03 /* read command */
29#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */ 29#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */
30#define SXGBE_MII_BUSY 0x00800000 /* mii busy */ 30#define SXGBE_MII_BUSY 0x00400000 /* mii busy */
31 31
32static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data) 32static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data)
33{ 33{
@@ -147,6 +147,7 @@ int sxgbe_mdio_register(struct net_device *ndev)
147 struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data; 147 struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data;
148 int err, phy_addr; 148 int err, phy_addr;
149 int *irqlist; 149 int *irqlist;
150 bool phy_found = false;
150 bool act; 151 bool act;
151 152
152 /* allocate the new mdio bus */ 153 /* allocate the new mdio bus */
@@ -162,7 +163,7 @@ int sxgbe_mdio_register(struct net_device *ndev)
162 irqlist = priv->mii_irq; 163 irqlist = priv->mii_irq;
163 164
164 /* assign mii bus fields */ 165 /* assign mii bus fields */
165 mdio_bus->name = "samsxgbe"; 166 mdio_bus->name = "sxgbe";
166 mdio_bus->read = &sxgbe_mdio_read; 167 mdio_bus->read = &sxgbe_mdio_read;
167 mdio_bus->write = &sxgbe_mdio_write; 168 mdio_bus->write = &sxgbe_mdio_write;
168 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x", 169 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x",
@@ -216,13 +217,22 @@ int sxgbe_mdio_register(struct net_device *ndev)
216 netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", 217 netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
217 phy->phy_id, phy_addr, irq_str, 218 phy->phy_id, phy_addr, irq_str,
218 dev_name(&phy->dev), act ? " active" : ""); 219 dev_name(&phy->dev), act ? " active" : "");
220 phy_found = true;
219 } 221 }
220 } 222 }
221 223
224 if (!phy_found) {
225 netdev_err(ndev, "PHY not found\n");
226 goto phyfound_err;
227 }
228
222 priv->mii = mdio_bus; 229 priv->mii = mdio_bus;
223 230
224 return 0; 231 return 0;
225 232
233phyfound_err:
234 err = -ENODEV;
235 mdiobus_unregister(mdio_bus);
226mdiobus_err: 236mdiobus_err:
227 mdiobus_free(mdio_bus); 237 mdiobus_free(mdio_bus);
228 return err; 238 return err;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
index 5a89acb4c505..56f8bf5a3f1b 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
@@ -52,6 +52,10 @@
52#define SXGBE_CORE_RX_CTL2_REG 0x00A8 52#define SXGBE_CORE_RX_CTL2_REG 0x00A8
53#define SXGBE_CORE_RX_CTL3_REG 0x00AC 53#define SXGBE_CORE_RX_CTL3_REG 0x00AC
54 54
55#define SXGBE_CORE_RXQ_ENABLE_MASK 0x0003
56#define SXGBE_CORE_RXQ_ENABLE 0x0002
57#define SXGBE_CORE_RXQ_DISABLE 0x0000
58
55/* Interrupt Registers */ 59/* Interrupt Registers */
56#define SXGBE_CORE_INT_STATUS_REG 0x00B0 60#define SXGBE_CORE_INT_STATUS_REG 0x00B0
57#define SXGBE_CORE_INT_ENABLE_REG 0x00B4 61#define SXGBE_CORE_INT_ENABLE_REG 0x00B4
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 21c20ea0dad0..b5ed30a39144 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -738,8 +738,11 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
738 /* If it was a port reset, trigger reallocation of MC resources. 738 /* If it was a port reset, trigger reallocation of MC resources.
739 * Note that on an MC reset nothing needs to be done now because we'll 739 * Note that on an MC reset nothing needs to be done now because we'll
740 * detect the MC reset later and handle it then. 740 * detect the MC reset later and handle it then.
741 * For an FLR, we never get an MC reset event, but the MC has reset all
742 * resources assigned to us, so we have to trigger reallocation now.
741 */ 743 */
742 if (reset_type == RESET_TYPE_ALL && !rc) 744 if ((reset_type == RESET_TYPE_ALL ||
745 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
743 efx_ef10_reset_mc_allocations(efx); 746 efx_ef10_reset_mc_allocations(efx);
744 return rc; 747 return rc;
745} 748}
@@ -2141,6 +2144,11 @@ static int efx_ef10_fini_dmaq(struct efx_nic *efx)
2141 return 0; 2144 return 0;
2142} 2145}
2143 2146
2147static void efx_ef10_prepare_flr(struct efx_nic *efx)
2148{
2149 atomic_set(&efx->active_queues, 0);
2150}
2151
2144static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, 2152static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
2145 const struct efx_filter_spec *right) 2153 const struct efx_filter_spec *right)
2146{ 2154{
@@ -3603,6 +3611,8 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
3603 .probe_port = efx_mcdi_port_probe, 3611 .probe_port = efx_mcdi_port_probe,
3604 .remove_port = efx_mcdi_port_remove, 3612 .remove_port = efx_mcdi_port_remove,
3605 .fini_dmaq = efx_ef10_fini_dmaq, 3613 .fini_dmaq = efx_ef10_fini_dmaq,
3614 .prepare_flr = efx_ef10_prepare_flr,
3615 .finish_flr = efx_port_dummy_op_void,
3606 .describe_stats = efx_ef10_describe_stats, 3616 .describe_stats = efx_ef10_describe_stats,
3607 .update_stats = efx_ef10_update_stats, 3617 .update_stats = efx_ef10_update_stats,
3608 .start_stats = efx_mcdi_mac_start_stats, 3618 .start_stats = efx_mcdi_mac_start_stats,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 57b971e5e6b2..63d595fd3cc5 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -76,6 +76,7 @@ const char *const efx_reset_type_names[] = {
76 [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", 76 [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
77 [RESET_TYPE_WORLD] = "WORLD", 77 [RESET_TYPE_WORLD] = "WORLD",
78 [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE", 78 [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
79 [RESET_TYPE_MC_BIST] = "MC_BIST",
79 [RESET_TYPE_DISABLE] = "DISABLE", 80 [RESET_TYPE_DISABLE] = "DISABLE",
80 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", 81 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
81 [RESET_TYPE_INT_ERROR] = "INT_ERROR", 82 [RESET_TYPE_INT_ERROR] = "INT_ERROR",
@@ -83,7 +84,7 @@ const char *const efx_reset_type_names[] = {
83 [RESET_TYPE_DMA_ERROR] = "DMA_ERROR", 84 [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
84 [RESET_TYPE_TX_SKIP] = "TX_SKIP", 85 [RESET_TYPE_TX_SKIP] = "TX_SKIP",
85 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", 86 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
86 [RESET_TYPE_MC_BIST] = "MC_BIST", 87 [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
87}; 88};
88 89
89/* Reset workqueue. If any NIC has a hardware failure then a reset will be 90/* Reset workqueue. If any NIC has a hardware failure then a reset will be
@@ -1739,7 +1740,8 @@ static void efx_start_all(struct efx_nic *efx)
1739 1740
1740 /* Check that it is appropriate to restart the interface. All 1741 /* Check that it is appropriate to restart the interface. All
1741 * of these flags are safe to read under just the rtnl lock */ 1742 * of these flags are safe to read under just the rtnl lock */
1742 if (efx->port_enabled || !netif_running(efx->net_dev)) 1743 if (efx->port_enabled || !netif_running(efx->net_dev) ||
1744 efx->reset_pending)
1743 return; 1745 return;
1744 1746
1745 efx_start_port(efx); 1747 efx_start_port(efx);
@@ -2334,6 +2336,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2334{ 2336{
2335 EFX_ASSERT_RESET_SERIALISED(efx); 2337 EFX_ASSERT_RESET_SERIALISED(efx);
2336 2338
2339 if (method == RESET_TYPE_MCDI_TIMEOUT)
2340 efx->type->prepare_flr(efx);
2341
2337 efx_stop_all(efx); 2342 efx_stop_all(efx);
2338 efx_disable_interrupts(efx); 2343 efx_disable_interrupts(efx);
2339 2344
@@ -2354,6 +2359,10 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2354 2359
2355 EFX_ASSERT_RESET_SERIALISED(efx); 2360 EFX_ASSERT_RESET_SERIALISED(efx);
2356 2361
2362 if (method == RESET_TYPE_MCDI_TIMEOUT)
2363 efx->type->finish_flr(efx);
2364
2365 /* Ensure that SRAM is initialised even if we're disabling the device */
2357 rc = efx->type->init(efx); 2366 rc = efx->type->init(efx);
2358 if (rc) { 2367 if (rc) {
2359 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); 2368 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
@@ -2417,7 +2426,10 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
2417 /* Clear flags for the scopes we covered. We assume the NIC and 2426 /* Clear flags for the scopes we covered. We assume the NIC and
2418 * driver are now quiescent so that there is no race here. 2427 * driver are now quiescent so that there is no race here.
2419 */ 2428 */
2420 efx->reset_pending &= -(1 << (method + 1)); 2429 if (method < RESET_TYPE_MAX_METHOD)
2430 efx->reset_pending &= -(1 << (method + 1));
2431 else /* it doesn't fit into the well-ordered scope hierarchy */
2432 __clear_bit(method, &efx->reset_pending);
2421 2433
2422 /* Reinitialise bus-mastering, which may have been turned off before 2434 /* Reinitialise bus-mastering, which may have been turned off before
2423 * the reset was scheduled. This is still appropriate, even in the 2435 * the reset was scheduled. This is still appropriate, even in the
@@ -2546,6 +2558,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
2546 case RESET_TYPE_DISABLE: 2558 case RESET_TYPE_DISABLE:
2547 case RESET_TYPE_RECOVER_OR_DISABLE: 2559 case RESET_TYPE_RECOVER_OR_DISABLE:
2548 case RESET_TYPE_MC_BIST: 2560 case RESET_TYPE_MC_BIST:
2561 case RESET_TYPE_MCDI_TIMEOUT:
2549 method = type; 2562 method = type;
2550 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", 2563 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2551 RESET_TYPE(method)); 2564 RESET_TYPE(method));
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 75ef7ef6450b..d1dbb5fb31bb 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -143,6 +143,7 @@ enum efx_loopback_mode {
143 * @RESET_TYPE_WORLD: Reset as much as possible 143 * @RESET_TYPE_WORLD: Reset as much as possible
144 * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if 144 * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
145 * unsuccessful. 145 * unsuccessful.
146 * @RESET_TYPE_MC_BIST: MC entering BIST mode.
146 * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled 147 * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
147 * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog 148 * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
148 * @RESET_TYPE_INT_ERROR: reset due to internal error 149 * @RESET_TYPE_INT_ERROR: reset due to internal error
@@ -150,14 +151,16 @@ enum efx_loopback_mode {
150 * @RESET_TYPE_DMA_ERROR: DMA error 151 * @RESET_TYPE_DMA_ERROR: DMA error
151 * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors 152 * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
152 * @RESET_TYPE_MC_FAILURE: MC reboot/assertion 153 * @RESET_TYPE_MC_FAILURE: MC reboot/assertion
154 * @RESET_TYPE_MCDI_TIMEOUT: MCDI timeout.
153 */ 155 */
154enum reset_type { 156enum reset_type {
155 RESET_TYPE_INVISIBLE = 0, 157 RESET_TYPE_INVISIBLE,
156 RESET_TYPE_RECOVER_OR_ALL = 1, 158 RESET_TYPE_RECOVER_OR_ALL,
157 RESET_TYPE_ALL = 2, 159 RESET_TYPE_ALL,
158 RESET_TYPE_WORLD = 3, 160 RESET_TYPE_WORLD,
159 RESET_TYPE_RECOVER_OR_DISABLE = 4, 161 RESET_TYPE_RECOVER_OR_DISABLE,
160 RESET_TYPE_DISABLE = 5, 162 RESET_TYPE_MC_BIST,
163 RESET_TYPE_DISABLE,
161 RESET_TYPE_MAX_METHOD, 164 RESET_TYPE_MAX_METHOD,
162 RESET_TYPE_TX_WATCHDOG, 165 RESET_TYPE_TX_WATCHDOG,
163 RESET_TYPE_INT_ERROR, 166 RESET_TYPE_INT_ERROR,
@@ -165,7 +168,13 @@ enum reset_type {
165 RESET_TYPE_DMA_ERROR, 168 RESET_TYPE_DMA_ERROR,
166 RESET_TYPE_TX_SKIP, 169 RESET_TYPE_TX_SKIP,
167 RESET_TYPE_MC_FAILURE, 170 RESET_TYPE_MC_FAILURE,
168 RESET_TYPE_MC_BIST, 171 /* RESET_TYPE_MCDI_TIMEOUT is actually a method, not just a reason, but
172 * it doesn't fit the scope hierarchy (not well-ordered by inclusion).
173 * We encode this by having its enum value be greater than
174 * RESET_TYPE_MAX_METHOD. This also prevents issuing it with
175 * efx_ioctl_reset.
176 */
177 RESET_TYPE_MCDI_TIMEOUT,
169 RESET_TYPE_MAX, 178 RESET_TYPE_MAX,
170}; 179};
171 180
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 8ec20b713cc6..fae25a418647 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2696,6 +2696,8 @@ const struct efx_nic_type falcon_a1_nic_type = {
2696 .fini_dmaq = efx_farch_fini_dmaq, 2696 .fini_dmaq = efx_farch_fini_dmaq,
2697 .prepare_flush = falcon_prepare_flush, 2697 .prepare_flush = falcon_prepare_flush,
2698 .finish_flush = efx_port_dummy_op_void, 2698 .finish_flush = efx_port_dummy_op_void,
2699 .prepare_flr = efx_port_dummy_op_void,
2700 .finish_flr = efx_farch_finish_flr,
2699 .describe_stats = falcon_describe_nic_stats, 2701 .describe_stats = falcon_describe_nic_stats,
2700 .update_stats = falcon_update_nic_stats, 2702 .update_stats = falcon_update_nic_stats,
2701 .start_stats = falcon_start_nic_stats, 2703 .start_stats = falcon_start_nic_stats,
@@ -2790,6 +2792,8 @@ const struct efx_nic_type falcon_b0_nic_type = {
2790 .fini_dmaq = efx_farch_fini_dmaq, 2792 .fini_dmaq = efx_farch_fini_dmaq,
2791 .prepare_flush = falcon_prepare_flush, 2793 .prepare_flush = falcon_prepare_flush,
2792 .finish_flush = efx_port_dummy_op_void, 2794 .finish_flush = efx_port_dummy_op_void,
2795 .prepare_flr = efx_port_dummy_op_void,
2796 .finish_flr = efx_farch_finish_flr,
2793 .describe_stats = falcon_describe_nic_stats, 2797 .describe_stats = falcon_describe_nic_stats,
2794 .update_stats = falcon_update_nic_stats, 2798 .update_stats = falcon_update_nic_stats,
2795 .start_stats = falcon_start_nic_stats, 2799 .start_stats = falcon_start_nic_stats,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index a08761360cdf..0537381cd2f6 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -741,6 +741,28 @@ int efx_farch_fini_dmaq(struct efx_nic *efx)
741 return rc; 741 return rc;
742} 742}
743 743
744/* Reset queue and flush accounting after FLR
745 *
746 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
747 * mastering was disabled), in which case we don't receive (RXQ) flush
748 * completion events. This means that efx->rxq_flush_outstanding remained at 4
749 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
750 * events were received, and we didn't go through efx_check_tx_flush_complete())
751 * If we don't fix this up, on the next call to efx_realloc_channels() we won't
752 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
753 * for batched flush requests; and the efx->active_queues gets messed up because
754 * we keep incrementing for the newly initialised queues, but it never went to
755 * zero previously. Then we get a timeout every time we try to restart the
756 * queues, as it doesn't go back to zero when we should be flushing the queues.
757 */
758void efx_farch_finish_flr(struct efx_nic *efx)
759{
760 atomic_set(&efx->rxq_flush_pending, 0);
761 atomic_set(&efx->rxq_flush_outstanding, 0);
762 atomic_set(&efx->active_queues, 0);
763}
764
765
744/************************************************************************** 766/**************************************************************************
745 * 767 *
746 * Event queue processing 768 * Event queue processing
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 7bd4b14bf3b3..5239cf9bdc56 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -52,12 +52,7 @@ static void efx_mcdi_timeout_async(unsigned long context);
52static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 52static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
53 bool *was_attached_out); 53 bool *was_attached_out);
54static bool efx_mcdi_poll_once(struct efx_nic *efx); 54static bool efx_mcdi_poll_once(struct efx_nic *efx);
55 55static void efx_mcdi_abandon(struct efx_nic *efx);
56static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
57{
58 EFX_BUG_ON_PARANOID(!efx->mcdi);
59 return &efx->mcdi->iface;
60}
61 56
62int efx_mcdi_init(struct efx_nic *efx) 57int efx_mcdi_init(struct efx_nic *efx)
63{ 58{
@@ -558,6 +553,8 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
558 rc = 0; 553 rc = 0;
559 } 554 }
560 555
556 efx_mcdi_abandon(efx);
557
561 /* Close the race with efx_mcdi_ev_cpl() executing just too late 558 /* Close the race with efx_mcdi_ev_cpl() executing just too late
562 * and completing a request we've just cancelled, by ensuring 559 * and completing a request we've just cancelled, by ensuring
563 * that the seqno check therein fails. 560 * that the seqno check therein fails.
@@ -672,6 +669,9 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
672 if (efx->mc_bist_for_other_fn) 669 if (efx->mc_bist_for_other_fn)
673 return -ENETDOWN; 670 return -ENETDOWN;
674 671
672 if (mcdi->mode == MCDI_MODE_FAIL)
673 return -ENETDOWN;
674
675 efx_mcdi_acquire_sync(mcdi); 675 efx_mcdi_acquire_sync(mcdi);
676 efx_mcdi_send_request(efx, cmd, inbuf, inlen); 676 efx_mcdi_send_request(efx, cmd, inbuf, inlen);
677 return 0; 677 return 0;
@@ -812,7 +812,11 @@ void efx_mcdi_mode_poll(struct efx_nic *efx)
812 return; 812 return;
813 813
814 mcdi = efx_mcdi(efx); 814 mcdi = efx_mcdi(efx);
815 if (mcdi->mode == MCDI_MODE_POLL) 815 /* If already in polling mode, nothing to do.
816 * If in fail-fast state, don't switch to polled completion.
817 * FLR recovery will do that later.
818 */
819 if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL)
816 return; 820 return;
817 821
818 /* We can switch from event completion to polled completion, because 822 /* We can switch from event completion to polled completion, because
@@ -841,8 +845,8 @@ void efx_mcdi_flush_async(struct efx_nic *efx)
841 845
842 mcdi = efx_mcdi(efx); 846 mcdi = efx_mcdi(efx);
843 847
844 /* We must be in polling mode so no more requests can be queued */ 848 /* We must be in poll or fail mode so no more requests can be queued */
845 BUG_ON(mcdi->mode != MCDI_MODE_POLL); 849 BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
846 850
847 del_timer_sync(&mcdi->async_timer); 851 del_timer_sync(&mcdi->async_timer);
848 852
@@ -875,8 +879,11 @@ void efx_mcdi_mode_event(struct efx_nic *efx)
875 return; 879 return;
876 880
877 mcdi = efx_mcdi(efx); 881 mcdi = efx_mcdi(efx);
878 882 /* If already in event completion mode, nothing to do.
879 if (mcdi->mode == MCDI_MODE_EVENTS) 883 * If in fail-fast state, don't switch to event completion. FLR
884 * recovery will do that later.
885 */
886 if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL)
880 return; 887 return;
881 888
882 /* We can't switch from polled to event completion in the middle of a 889 /* We can't switch from polled to event completion in the middle of a
@@ -966,6 +973,19 @@ static void efx_mcdi_ev_bist(struct efx_nic *efx)
966 spin_unlock(&mcdi->iface_lock); 973 spin_unlock(&mcdi->iface_lock);
967} 974}
968 975
976/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try
977 * to recover.
978 */
979static void efx_mcdi_abandon(struct efx_nic *efx)
980{
981 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
982
983 if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL)
984 return; /* it had already been done */
985 netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
986 efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
987}
988
969/* Called from falcon_process_eventq for MCDI events */ 989/* Called from falcon_process_eventq for MCDI events */
970void efx_mcdi_process_event(struct efx_channel *channel, 990void efx_mcdi_process_event(struct efx_channel *channel,
971 efx_qword_t *event) 991 efx_qword_t *event)
@@ -1512,6 +1532,19 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
1512{ 1532{
1513 int rc; 1533 int rc;
1514 1534
1535 /* If MCDI is down, we can't handle_assertion */
1536 if (method == RESET_TYPE_MCDI_TIMEOUT) {
1537 rc = pci_reset_function(efx->pci_dev);
1538 if (rc)
1539 return rc;
1540 /* Re-enable polled MCDI completion */
1541 if (efx->mcdi) {
1542 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
1543 mcdi->mode = MCDI_MODE_POLL;
1544 }
1545 return 0;
1546 }
1547
1515 /* Recover from a failed assertion pre-reset */ 1548 /* Recover from a failed assertion pre-reset */
1516 rc = efx_mcdi_handle_assertion(efx); 1549 rc = efx_mcdi_handle_assertion(efx);
1517 if (rc) 1550 if (rc)
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 52931aebf3c3..56465f7465a2 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -28,9 +28,16 @@ enum efx_mcdi_state {
28 MCDI_STATE_COMPLETED, 28 MCDI_STATE_COMPLETED,
29}; 29};
30 30
31/**
32 * enum efx_mcdi_mode - MCDI transaction mode
33 * @MCDI_MODE_POLL: poll for MCDI completion, until timeout
34 * @MCDI_MODE_EVENTS: wait for an mcdi_event. On timeout, poll once
35 * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
36 */
31enum efx_mcdi_mode { 37enum efx_mcdi_mode {
32 MCDI_MODE_POLL, 38 MCDI_MODE_POLL,
33 MCDI_MODE_EVENTS, 39 MCDI_MODE_EVENTS,
40 MCDI_MODE_FAIL,
34}; 41};
35 42
36/** 43/**
@@ -104,6 +111,12 @@ struct efx_mcdi_data {
104 u32 fn_flags; 111 u32 fn_flags;
105}; 112};
106 113
114static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
115{
116 EFX_BUG_ON_PARANOID(!efx->mcdi);
117 return &efx->mcdi->iface;
118}
119
107#ifdef CONFIG_SFC_MCDI_MON 120#ifdef CONFIG_SFC_MCDI_MON
108static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx) 121static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
109{ 122{
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 8a400a0595eb..5bdae8ed7c57 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -972,6 +972,8 @@ struct efx_mtd_partition {
972 * (for Falcon architecture) 972 * (for Falcon architecture)
973 * @finish_flush: Clean up after flushing the DMA queues (for Falcon 973 * @finish_flush: Clean up after flushing the DMA queues (for Falcon
974 * architecture) 974 * architecture)
975 * @prepare_flr: Prepare for an FLR
976 * @finish_flr: Clean up after an FLR
975 * @describe_stats: Describe statistics for ethtool 977 * @describe_stats: Describe statistics for ethtool
976 * @update_stats: Update statistics not provided by event handling. 978 * @update_stats: Update statistics not provided by event handling.
977 * Either argument may be %NULL. 979 * Either argument may be %NULL.
@@ -1100,6 +1102,8 @@ struct efx_nic_type {
1100 int (*fini_dmaq)(struct efx_nic *efx); 1102 int (*fini_dmaq)(struct efx_nic *efx);
1101 void (*prepare_flush)(struct efx_nic *efx); 1103 void (*prepare_flush)(struct efx_nic *efx);
1102 void (*finish_flush)(struct efx_nic *efx); 1104 void (*finish_flush)(struct efx_nic *efx);
1105 void (*prepare_flr)(struct efx_nic *efx);
1106 void (*finish_flr)(struct efx_nic *efx);
1103 size_t (*describe_stats)(struct efx_nic *efx, u8 *names); 1107 size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
1104 size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats, 1108 size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
1105 struct rtnl_link_stats64 *core_stats); 1109 struct rtnl_link_stats64 *core_stats);
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 32d969e857f7..89b83e59e1dc 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
156 efx->net_dev->rx_cpu_rmap = NULL; 156 efx->net_dev->rx_cpu_rmap = NULL;
157#endif 157#endif
158 158
159 /* Disable MSI/MSI-X interrupts */ 159 if (EFX_INT_MODE_USE_MSI(efx)) {
160 efx_for_each_channel(channel, efx) 160 /* Disable MSI/MSI-X interrupts */
161 free_irq(channel->irq, &efx->msi_context[channel->channel]); 161 efx_for_each_channel(channel, efx)
162 162 free_irq(channel->irq,
163 /* Disable legacy interrupt */ 163 &efx->msi_context[channel->channel]);
164 if (efx->legacy_irq) 164 } else {
165 /* Disable legacy interrupt */
165 free_irq(efx->legacy_irq, efx); 166 free_irq(efx->legacy_irq, efx);
167 }
166} 168}
167 169
168/* Register dump */ 170/* Register dump */
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index a001fae1a8d7..d3ad8ed8d901 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -757,6 +757,7 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
757int efx_nic_flush_queues(struct efx_nic *efx); 757int efx_nic_flush_queues(struct efx_nic *efx);
758void siena_prepare_flush(struct efx_nic *efx); 758void siena_prepare_flush(struct efx_nic *efx);
759int efx_farch_fini_dmaq(struct efx_nic *efx); 759int efx_farch_fini_dmaq(struct efx_nic *efx);
760void efx_farch_finish_flr(struct efx_nic *efx);
760void siena_finish_flush(struct efx_nic *efx); 761void siena_finish_flush(struct efx_nic *efx);
761void falcon_start_nic_stats(struct efx_nic *efx); 762void falcon_start_nic_stats(struct efx_nic *efx);
762void falcon_stop_nic_stats(struct efx_nic *efx); 763void falcon_stop_nic_stats(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 23f3a6f7737a..50ffefed492c 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -921,6 +921,8 @@ const struct efx_nic_type siena_a0_nic_type = {
921 .fini_dmaq = efx_farch_fini_dmaq, 921 .fini_dmaq = efx_farch_fini_dmaq,
922 .prepare_flush = siena_prepare_flush, 922 .prepare_flush = siena_prepare_flush,
923 .finish_flush = siena_finish_flush, 923 .finish_flush = siena_finish_flush,
924 .prepare_flr = efx_port_dummy_op_void,
925 .finish_flr = efx_farch_finish_flr,
924 .describe_stats = siena_describe_nic_stats, 926 .describe_stats = siena_describe_nic_stats,
925 .update_stats = siena_update_nic_stats, 927 .update_stats = siena_update_nic_stats,
926 .start_stats = efx_mcdi_mac_start_stats, 928 .start_stats = efx_mcdi_mac_start_stats,
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index d1b4dca53a9d..bcaa41af1e62 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -147,18 +147,19 @@ MODULE_ALIAS("platform:smc91x");
147 */ 147 */
148#define MII_DELAY 1 148#define MII_DELAY 1
149 149
150#if SMC_DEBUG > 0 150#define DBG(n, dev, fmt, ...) \
151#define DBG(n, dev, args...) \ 151 do { \
152 do { \ 152 if (SMC_DEBUG >= (n)) \
153 if (SMC_DEBUG >= (n)) \ 153 netdev_dbg(dev, fmt, ##__VA_ARGS__); \
154 netdev_dbg(dev, args); \
155 } while (0) 154 } while (0)
156 155
157#define PRINTK(dev, args...) netdev_info(dev, args) 156#define PRINTK(dev, fmt, ...) \
158#else 157 do { \
159#define DBG(n, dev, args...) do { } while (0) 158 if (SMC_DEBUG > 0) \
160#define PRINTK(dev, args...) netdev_dbg(dev, args) 159 netdev_info(dev, fmt, ##__VA_ARGS__); \
161#endif 160 else \
161 netdev_dbg(dev, fmt, ##__VA_ARGS__); \
162 } while (0)
162 163
163#if SMC_DEBUG > 3 164#if SMC_DEBUG > 3
164static void PRINT_PKT(u_char *buf, int length) 165static void PRINT_PKT(u_char *buf, int length)
@@ -191,7 +192,7 @@ static void PRINT_PKT(u_char *buf, int length)
191 pr_cont("\n"); 192 pr_cont("\n");
192} 193}
193#else 194#else
194#define PRINT_PKT(x...) do { } while (0) 195static inline void PRINT_PKT(u_char *buf, int length) { }
195#endif 196#endif
196 197
197 198
@@ -1781,7 +1782,7 @@ static int smc_findirq(struct smc_local *lp)
1781 int timeout = 20; 1782 int timeout = 20;
1782 unsigned long cookie; 1783 unsigned long cookie;
1783 1784
1784 DBG(2, dev, "%s: %s\n", CARDNAME, __func__); 1785 DBG(2, lp->dev, "%s: %s\n", CARDNAME, __func__);
1785 1786
1786 cookie = probe_irq_on(); 1787 cookie = probe_irq_on();
1787 1788
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d940034acdd4..0f4841d2e8dc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev)
1704 if (ret) { 1704 if (ret) {
1705 pr_err("%s: Cannot attach to PHY (error: %d)\n", 1705 pr_err("%s: Cannot attach to PHY (error: %d)\n",
1706 __func__, ret); 1706 __func__, ret);
1707 goto phy_error; 1707 return ret;
1708 } 1708 }
1709 } 1709 }
1710 1710
@@ -1779,8 +1779,6 @@ init_error:
1779dma_desc_error: 1779dma_desc_error:
1780 if (priv->phydev) 1780 if (priv->phydev)
1781 phy_disconnect(priv->phydev); 1781 phy_disconnect(priv->phydev);
1782phy_error:
1783 clk_disable_unprepare(priv->stmmac_clk);
1784 1782
1785 return ret; 1783 return ret;
1786} 1784}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index df8d383acf48..b9ac20f42651 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp)
246 int i; 246 int i;
247 247
248 for (i = 0; i < N_TX_RINGS; i++) 248 for (i = 0; i < N_TX_RINGS; i++)
249 spin_lock(&cp->tx_lock[i]); 249 spin_lock_nested(&cp->tx_lock[i], i);
250} 250}
251 251
252static inline void cas_lock_all(struct cas *cp) 252static inline void cas_lock_all(struct cas *cp)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 36aa109416c4..c331b7ebc812 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1871,18 +1871,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1871 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 1871 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
1872 phyid = be32_to_cpup(parp+1); 1872 phyid = be32_to_cpup(parp+1);
1873 mdio = of_find_device_by_node(mdio_node); 1873 mdio = of_find_device_by_node(mdio_node);
1874 1874 of_node_put(mdio_node);
1875 if (strncmp(mdio->name, "gpio", 4) == 0) { 1875 if (!mdio) {
1876 /* GPIO bitbang MDIO driver attached */ 1876 pr_err("Missing mdio platform device\n");
1877 struct mii_bus *bus = dev_get_drvdata(&mdio->dev); 1877 return -EINVAL;
1878
1879 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1880 PHY_ID_FMT, bus->id, phyid);
1881 } else {
1882 /* davinci MDIO driver attached */
1883 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1884 PHY_ID_FMT, mdio->name, phyid);
1885 } 1878 }
1879 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1880 PHY_ID_FMT, mdio->name, phyid);
1886 1881
1887 mac_addr = of_get_mac_address(slave_node); 1882 mac_addr = of_get_mac_address(slave_node);
1888 if (mac_addr) 1883 if (mac_addr)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 31e55fba7cad..7918d5132c1f 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -382,6 +382,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
382 if (skb_is_gso(skb)) 382 if (skb_is_gso(skb))
383 goto do_lso; 383 goto do_lso;
384 384
385 if ((skb->ip_summed == CHECKSUM_NONE) ||
386 (skb->ip_summed == CHECKSUM_UNNECESSARY))
387 goto do_send;
388
385 rndis_msg_size += NDIS_CSUM_PPI_SIZE; 389 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
386 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, 390 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
387 TCPIP_CHKSUM_PKTINFO); 391 TCPIP_CHKSUM_PKTINFO);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 430bb0db9bc4..e36f194673a4 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -365,7 +365,7 @@ __at86rf230_read_subreg(struct at86rf230_local *lp,
365 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); 365 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
366 366
367 if (status == 0) 367 if (status == 0)
368 *data = buf[1]; 368 *data = (buf[1] & mask) >> shift;
369 369
370 return status; 370 return status;
371} 371}
@@ -1025,14 +1025,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
1025 return -EINVAL; 1025 return -EINVAL;
1026 } 1026 }
1027 1027
1028 rc = at86rf230_read_subreg(lp, SR_AVDD_OK, &status);
1029 if (rc)
1030 return rc;
1031 if (!status) {
1032 dev_err(&lp->spi->dev, "AVDD error\n");
1033 return -EINVAL;
1034 }
1035
1036 return 0; 1028 return 0;
1037} 1029}
1038 1030
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 753a8c23d15d..d53e299ae1d9 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -263,11 +263,9 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
263 const struct macvlan_dev *vlan = netdev_priv(dev); 263 const struct macvlan_dev *vlan = netdev_priv(dev);
264 const struct macvlan_port *port = vlan->port; 264 const struct macvlan_port *port = vlan->port;
265 const struct macvlan_dev *dest; 265 const struct macvlan_dev *dest;
266 __u8 ip_summed = skb->ip_summed;
267 266
268 if (vlan->mode == MACVLAN_MODE_BRIDGE) { 267 if (vlan->mode == MACVLAN_MODE_BRIDGE) {
269 const struct ethhdr *eth = (void *)skb->data; 268 const struct ethhdr *eth = (void *)skb->data;
270 skb->ip_summed = CHECKSUM_UNNECESSARY;
271 269
272 /* send to other bridge ports directly */ 270 /* send to other bridge ports directly */
273 if (is_multicast_ether_addr(eth->h_dest)) { 271 if (is_multicast_ether_addr(eth->h_dest)) {
@@ -285,7 +283,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
285 } 283 }
286 284
287xmit_world: 285xmit_world:
288 skb->ip_summed = ip_summed;
289 skb->dev = vlan->lowerdev; 286 skb->dev = vlan->lowerdev;
290 return dev_queue_xmit(skb); 287 return dev_queue_xmit(skb);
291} 288}
@@ -461,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
461 struct macvlan_dev *vlan = netdev_priv(dev); 458 struct macvlan_dev *vlan = netdev_priv(dev);
462 struct net_device *lowerdev = vlan->lowerdev; 459 struct net_device *lowerdev = vlan->lowerdev;
463 460
464 if (change & IFF_ALLMULTI) 461 if (dev->flags & IFF_UP) {
465 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); 462 if (change & IFF_ALLMULTI)
463 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
464 }
466} 465}
467 466
468static void macvlan_set_mac_lists(struct net_device *dev) 467static void macvlan_set_mac_lists(struct net_device *dev)
@@ -518,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
518#define MACVLAN_STATE_MASK \ 517#define MACVLAN_STATE_MASK \
519 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) 518 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
520 519
520static int macvlan_get_nest_level(struct net_device *dev)
521{
522 return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
523}
524
521static void macvlan_set_lockdep_class_one(struct net_device *dev, 525static void macvlan_set_lockdep_class_one(struct net_device *dev,
522 struct netdev_queue *txq, 526 struct netdev_queue *txq,
523 void *_unused) 527 void *_unused)
@@ -528,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
528 532
529static void macvlan_set_lockdep_class(struct net_device *dev) 533static void macvlan_set_lockdep_class(struct net_device *dev)
530{ 534{
531 lockdep_set_class(&dev->addr_list_lock, 535 lockdep_set_class_and_subclass(&dev->addr_list_lock,
532 &macvlan_netdev_addr_lock_key); 536 &macvlan_netdev_addr_lock_key,
537 macvlan_get_nest_level(dev));
533 netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); 538 netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
534} 539}
535 540
@@ -724,6 +729,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
724 .ndo_fdb_add = macvlan_fdb_add, 729 .ndo_fdb_add = macvlan_fdb_add,
725 .ndo_fdb_del = macvlan_fdb_del, 730 .ndo_fdb_del = macvlan_fdb_del,
726 .ndo_fdb_dump = ndo_dflt_fdb_dump, 731 .ndo_fdb_dump = ndo_dflt_fdb_dump,
732 .ndo_get_lock_subclass = macvlan_get_nest_level,
727}; 733};
728 734
729void macvlan_common_setup(struct net_device *dev) 735void macvlan_common_setup(struct net_device *dev)
@@ -852,6 +858,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
852 vlan->dev = dev; 858 vlan->dev = dev;
853 vlan->port = port; 859 vlan->port = port;
854 vlan->set_features = MACVLAN_FEATURES; 860 vlan->set_features = MACVLAN_FEATURES;
861 vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
855 862
856 vlan->mode = MACVLAN_MODE_VEPA; 863 vlan->mode = MACVLAN_MODE_VEPA;
857 if (data && data[IFLA_MACVLAN_MODE]) 864 if (data && data[IFLA_MACVLAN_MODE])
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index ff111a89e17f..3381c4f91a8c 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -322,6 +322,15 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
322 segs = nskb; 322 segs = nskb;
323 } 323 }
324 } else { 324 } else {
325 /* If we receive a partial checksum and the tap side
326 * doesn't support checksum offload, compute the checksum.
327 * Note: it doesn't matter which checksum feature to
328 * check, we either support them all or none.
329 */
330 if (skb->ip_summed == CHECKSUM_PARTIAL &&
331 !(features & NETIF_F_ALL_CSUM) &&
332 skb_checksum_help(skb))
333 goto drop;
325 skb_queue_tail(&q->sk.sk_receive_queue, skb); 334 skb_queue_tail(&q->sk.sk_receive_queue, skb);
326 } 335 }
327 336
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index e701433bf52f..5f1a2250018f 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -32,29 +32,39 @@
32 32
33struct mdio_gpio_info { 33struct mdio_gpio_info {
34 struct mdiobb_ctrl ctrl; 34 struct mdiobb_ctrl ctrl;
35 int mdc, mdio; 35 int mdc, mdio, mdo;
36 int mdc_active_low, mdio_active_low, mdo_active_low;
36}; 37};
37 38
38static void *mdio_gpio_of_get_data(struct platform_device *pdev) 39static void *mdio_gpio_of_get_data(struct platform_device *pdev)
39{ 40{
40 struct device_node *np = pdev->dev.of_node; 41 struct device_node *np = pdev->dev.of_node;
41 struct mdio_gpio_platform_data *pdata; 42 struct mdio_gpio_platform_data *pdata;
43 enum of_gpio_flags flags;
42 int ret; 44 int ret;
43 45
44 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 46 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
45 if (!pdata) 47 if (!pdata)
46 return NULL; 48 return NULL;
47 49
48 ret = of_get_gpio(np, 0); 50 ret = of_get_gpio_flags(np, 0, &flags);
49 if (ret < 0) 51 if (ret < 0)
50 return NULL; 52 return NULL;
51 53
52 pdata->mdc = ret; 54 pdata->mdc = ret;
55 pdata->mdc_active_low = flags & OF_GPIO_ACTIVE_LOW;
53 56
54 ret = of_get_gpio(np, 1); 57 ret = of_get_gpio_flags(np, 1, &flags);
55 if (ret < 0) 58 if (ret < 0)
56 return NULL; 59 return NULL;
57 pdata->mdio = ret; 60 pdata->mdio = ret;
61 pdata->mdio_active_low = flags & OF_GPIO_ACTIVE_LOW;
62
63 ret = of_get_gpio_flags(np, 2, &flags);
64 if (ret > 0) {
65 pdata->mdo = ret;
66 pdata->mdo_active_low = flags & OF_GPIO_ACTIVE_LOW;
67 }
58 68
59 return pdata; 69 return pdata;
60} 70}
@@ -64,8 +74,19 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
64 struct mdio_gpio_info *bitbang = 74 struct mdio_gpio_info *bitbang =
65 container_of(ctrl, struct mdio_gpio_info, ctrl); 75 container_of(ctrl, struct mdio_gpio_info, ctrl);
66 76
77 if (bitbang->mdo) {
78 /* Separate output pin. Always set its value to high
79 * when changing direction. If direction is input,
80 * assume the pin serves as pull-up. If direction is
81 * output, the default value is high.
82 */
83 gpio_set_value(bitbang->mdo, 1 ^ bitbang->mdo_active_low);
84 return;
85 }
86
67 if (dir) 87 if (dir)
68 gpio_direction_output(bitbang->mdio, 1); 88 gpio_direction_output(bitbang->mdio,
89 1 ^ bitbang->mdio_active_low);
69 else 90 else
70 gpio_direction_input(bitbang->mdio); 91 gpio_direction_input(bitbang->mdio);
71} 92}
@@ -75,7 +96,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
75 struct mdio_gpio_info *bitbang = 96 struct mdio_gpio_info *bitbang =
76 container_of(ctrl, struct mdio_gpio_info, ctrl); 97 container_of(ctrl, struct mdio_gpio_info, ctrl);
77 98
78 return gpio_get_value(bitbang->mdio); 99 return gpio_get_value(bitbang->mdio) ^ bitbang->mdio_active_low;
79} 100}
80 101
81static void mdio_set(struct mdiobb_ctrl *ctrl, int what) 102static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
@@ -83,7 +104,10 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
83 struct mdio_gpio_info *bitbang = 104 struct mdio_gpio_info *bitbang =
84 container_of(ctrl, struct mdio_gpio_info, ctrl); 105 container_of(ctrl, struct mdio_gpio_info, ctrl);
85 106
86 gpio_set_value(bitbang->mdio, what); 107 if (bitbang->mdo)
108 gpio_set_value(bitbang->mdo, what ^ bitbang->mdo_active_low);
109 else
110 gpio_set_value(bitbang->mdio, what ^ bitbang->mdio_active_low);
87} 111}
88 112
89static void mdc_set(struct mdiobb_ctrl *ctrl, int what) 113static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
@@ -91,7 +115,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
91 struct mdio_gpio_info *bitbang = 115 struct mdio_gpio_info *bitbang =
92 container_of(ctrl, struct mdio_gpio_info, ctrl); 116 container_of(ctrl, struct mdio_gpio_info, ctrl);
93 117
94 gpio_set_value(bitbang->mdc, what); 118 gpio_set_value(bitbang->mdc, what ^ bitbang->mdc_active_low);
95} 119}
96 120
97static struct mdiobb_ops mdio_gpio_ops = { 121static struct mdiobb_ops mdio_gpio_ops = {
@@ -110,18 +134,22 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
110 struct mdio_gpio_info *bitbang; 134 struct mdio_gpio_info *bitbang;
111 int i; 135 int i;
112 136
113 bitbang = kzalloc(sizeof(*bitbang), GFP_KERNEL); 137 bitbang = devm_kzalloc(dev, sizeof(*bitbang), GFP_KERNEL);
114 if (!bitbang) 138 if (!bitbang)
115 goto out; 139 goto out;
116 140
117 bitbang->ctrl.ops = &mdio_gpio_ops; 141 bitbang->ctrl.ops = &mdio_gpio_ops;
118 bitbang->ctrl.reset = pdata->reset; 142 bitbang->ctrl.reset = pdata->reset;
119 bitbang->mdc = pdata->mdc; 143 bitbang->mdc = pdata->mdc;
144 bitbang->mdc_active_low = pdata->mdc_active_low;
120 bitbang->mdio = pdata->mdio; 145 bitbang->mdio = pdata->mdio;
146 bitbang->mdio_active_low = pdata->mdio_active_low;
147 bitbang->mdo = pdata->mdo;
148 bitbang->mdo_active_low = pdata->mdo_active_low;
121 149
122 new_bus = alloc_mdio_bitbang(&bitbang->ctrl); 150 new_bus = alloc_mdio_bitbang(&bitbang->ctrl);
123 if (!new_bus) 151 if (!new_bus)
124 goto out_free_bitbang; 152 goto out;
125 153
126 new_bus->name = "GPIO Bitbanged MDIO", 154 new_bus->name = "GPIO Bitbanged MDIO",
127 155
@@ -138,11 +166,18 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
138 166
139 snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id); 167 snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id);
140 168
141 if (gpio_request(bitbang->mdc, "mdc")) 169 if (devm_gpio_request(dev, bitbang->mdc, "mdc"))
170 goto out_free_bus;
171
172 if (devm_gpio_request(dev, bitbang->mdio, "mdio"))
142 goto out_free_bus; 173 goto out_free_bus;
143 174
144 if (gpio_request(bitbang->mdio, "mdio")) 175 if (bitbang->mdo) {
145 goto out_free_mdc; 176 if (devm_gpio_request(dev, bitbang->mdo, "mdo"))
177 goto out_free_bus;
178 gpio_direction_output(bitbang->mdo, 1);
179 gpio_direction_input(bitbang->mdio);
180 }
146 181
147 gpio_direction_output(bitbang->mdc, 0); 182 gpio_direction_output(bitbang->mdc, 0);
148 183
@@ -150,12 +185,8 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
150 185
151 return new_bus; 186 return new_bus;
152 187
153out_free_mdc:
154 gpio_free(bitbang->mdc);
155out_free_bus: 188out_free_bus:
156 free_mdio_bitbang(new_bus); 189 free_mdio_bitbang(new_bus);
157out_free_bitbang:
158 kfree(bitbang);
159out: 190out:
160 return NULL; 191 return NULL;
161} 192}
@@ -163,13 +194,8 @@ out:
163static void mdio_gpio_bus_deinit(struct device *dev) 194static void mdio_gpio_bus_deinit(struct device *dev)
164{ 195{
165 struct mii_bus *bus = dev_get_drvdata(dev); 196 struct mii_bus *bus = dev_get_drvdata(dev);
166 struct mdio_gpio_info *bitbang = bus->priv;
167 197
168 dev_set_drvdata(dev, NULL);
169 gpio_free(bitbang->mdio);
170 gpio_free(bitbang->mdc);
171 free_mdio_bitbang(bus); 198 free_mdio_bitbang(bus);
172 kfree(bitbang);
173} 199}
174 200
175static void mdio_gpio_bus_destroy(struct device *dev) 201static void mdio_gpio_bus_destroy(struct device *dev)
@@ -189,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev)
189 if (pdev->dev.of_node) { 215 if (pdev->dev.of_node) {
190 pdata = mdio_gpio_of_get_data(pdev); 216 pdata = mdio_gpio_of_get_data(pdev);
191 bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); 217 bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
218 if (bus_id < 0) {
219 dev_warn(&pdev->dev, "failed to get alias id\n");
220 bus_id = 0;
221 }
192 } else { 222 } else {
193 pdata = dev_get_platdata(&pdev->dev); 223 pdata = dev_get_platdata(&pdev->dev);
194 bus_id = pdev->id; 224 bus_id = pdev->id;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 5ad971a55c5d..d849684231c1 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -246,13 +246,13 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev,
246 if (val1 != -1) 246 if (val1 != -1)
247 newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0); 247 newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0);
248 248
249 if (val2 != -1) 249 if (val2 != -2)
250 newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4); 250 newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4);
251 251
252 if (val3 != -1) 252 if (val3 != -3)
253 newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8); 253 newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8);
254 254
255 if (val4 != -1) 255 if (val4 != -4)
256 newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12); 256 newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12);
257 257
258 return kszphy_extended_write(phydev, reg, newval); 258 return kszphy_extended_write(phydev, reg, newval);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1b6d09aef427..3bc079a67a3d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work)
715 struct delayed_work *dwork = to_delayed_work(work); 715 struct delayed_work *dwork = to_delayed_work(work);
716 struct phy_device *phydev = 716 struct phy_device *phydev =
717 container_of(dwork, struct phy_device, state_queue); 717 container_of(dwork, struct phy_device, state_queue);
718 int needs_aneg = 0, do_suspend = 0; 718 bool needs_aneg = false, do_suspend = false, do_resume = false;
719 int err = 0; 719 int err = 0;
720 720
721 mutex_lock(&phydev->lock); 721 mutex_lock(&phydev->lock);
@@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work)
727 case PHY_PENDING: 727 case PHY_PENDING:
728 break; 728 break;
729 case PHY_UP: 729 case PHY_UP:
730 needs_aneg = 1; 730 needs_aneg = true;
731 731
732 phydev->link_timeout = PHY_AN_TIMEOUT; 732 phydev->link_timeout = PHY_AN_TIMEOUT;
733 733
@@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work)
757 phydev->adjust_link(phydev->attached_dev); 757 phydev->adjust_link(phydev->attached_dev);
758 758
759 } else if (0 == phydev->link_timeout--) 759 } else if (0 == phydev->link_timeout--)
760 needs_aneg = 1; 760 needs_aneg = true;
761 break; 761 break;
762 case PHY_NOLINK: 762 case PHY_NOLINK:
763 err = phy_read_status(phydev); 763 err = phy_read_status(phydev);
@@ -765,6 +765,17 @@ void phy_state_machine(struct work_struct *work)
765 break; 765 break;
766 766
767 if (phydev->link) { 767 if (phydev->link) {
768 if (AUTONEG_ENABLE == phydev->autoneg) {
769 err = phy_aneg_done(phydev);
770 if (err < 0)
771 break;
772
773 if (!err) {
774 phydev->state = PHY_AN;
775 phydev->link_timeout = PHY_AN_TIMEOUT;
776 break;
777 }
778 }
768 phydev->state = PHY_RUNNING; 779 phydev->state = PHY_RUNNING;
769 netif_carrier_on(phydev->attached_dev); 780 netif_carrier_on(phydev->attached_dev);
770 phydev->adjust_link(phydev->attached_dev); 781 phydev->adjust_link(phydev->attached_dev);
@@ -780,7 +791,7 @@ void phy_state_machine(struct work_struct *work)
780 netif_carrier_on(phydev->attached_dev); 791 netif_carrier_on(phydev->attached_dev);
781 } else { 792 } else {
782 if (0 == phydev->link_timeout--) 793 if (0 == phydev->link_timeout--)
783 needs_aneg = 1; 794 needs_aneg = true;
784 } 795 }
785 796
786 phydev->adjust_link(phydev->attached_dev); 797 phydev->adjust_link(phydev->attached_dev);
@@ -816,7 +827,7 @@ void phy_state_machine(struct work_struct *work)
816 phydev->link = 0; 827 phydev->link = 0;
817 netif_carrier_off(phydev->attached_dev); 828 netif_carrier_off(phydev->attached_dev);
818 phydev->adjust_link(phydev->attached_dev); 829 phydev->adjust_link(phydev->attached_dev);
819 do_suspend = 1; 830 do_suspend = true;
820 } 831 }
821 break; 832 break;
822 case PHY_RESUMING: 833 case PHY_RESUMING:
@@ -865,6 +876,7 @@ void phy_state_machine(struct work_struct *work)
865 } 876 }
866 phydev->adjust_link(phydev->attached_dev); 877 phydev->adjust_link(phydev->attached_dev);
867 } 878 }
879 do_resume = true;
868 break; 880 break;
869 } 881 }
870 882
@@ -872,9 +884,10 @@ void phy_state_machine(struct work_struct *work)
872 884
873 if (needs_aneg) 885 if (needs_aneg)
874 err = phy_start_aneg(phydev); 886 err = phy_start_aneg(phydev);
875 887 else if (do_suspend)
876 if (do_suspend)
877 phy_suspend(phydev); 888 phy_suspend(phydev);
889 else if (do_resume)
890 phy_resume(phydev);
878 891
879 if (err < 0) 892 if (err < 0)
880 phy_error(phydev); 893 phy_error(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0ce606624296..4987a1c6dc52 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -614,8 +614,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
614 err = phy_init_hw(phydev); 614 err = phy_init_hw(phydev);
615 if (err) 615 if (err)
616 phy_detach(phydev); 616 phy_detach(phydev);
617 617 else
618 phy_resume(phydev); 618 phy_resume(phydev);
619 619
620 return err; 620 return err;
621} 621}
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index cc70ecfc7062..ad4a94e9ff57 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -429,13 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty)
429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) 429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
430 return; 430 return;
431 431
432 spin_lock(&sl->lock); 432 spin_lock_bh(&sl->lock);
433 if (sl->xleft <= 0) { 433 if (sl->xleft <= 0) {
434 /* Now serial buffer is almost free & we can start 434 /* Now serial buffer is almost free & we can start
435 * transmission of another packet */ 435 * transmission of another packet */
436 sl->dev->stats.tx_packets++; 436 sl->dev->stats.tx_packets++;
437 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 437 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
438 spin_unlock(&sl->lock); 438 spin_unlock_bh(&sl->lock);
439 sl_unlock(sl); 439 sl_unlock(sl);
440 return; 440 return;
441 } 441 }
@@ -443,7 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
443 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 443 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
444 sl->xleft -= actual; 444 sl->xleft -= actual;
445 sl->xhead += actual; 445 sl->xhead += actual;
446 spin_unlock(&sl->lock); 446 spin_unlock_bh(&sl->lock);
447} 447}
448 448
449static void sl_tx_timeout(struct net_device *dev) 449static void sl_tx_timeout(struct net_device *dev)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 33008c1d1d67..767fe61b5ac9 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2834,8 +2834,10 @@ static int team_device_event(struct notifier_block *unused,
2834 case NETDEV_UP: 2834 case NETDEV_UP:
2835 if (netif_carrier_ok(dev)) 2835 if (netif_carrier_ok(dev))
2836 team_port_change_check(port, true); 2836 team_port_change_check(port, true);
2837 break;
2837 case NETDEV_DOWN: 2838 case NETDEV_DOWN:
2838 team_port_change_check(port, false); 2839 team_port_change_check(port, false);
2840 break;
2839 case NETDEV_CHANGE: 2841 case NETDEV_CHANGE:
2840 if (netif_running(port->dev)) 2842 if (netif_running(port->dev))
2841 team_port_change_check(port, 2843 team_port_change_check(port,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index c9f3281506af..2e025ddcef21 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
120 cdc_ncm_unbind(dev, intf); 120 cdc_ncm_unbind(dev, intf);
121} 121}
122 122
123/* verify that the ethernet protocol is IPv4 or IPv6 */
124static bool is_ip_proto(__be16 proto)
125{
126 switch (proto) {
127 case htons(ETH_P_IP):
128 case htons(ETH_P_IPV6):
129 return true;
130 }
131 return false;
132}
123 133
124static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) 134static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
125{ 135{
@@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
128 struct cdc_ncm_ctx *ctx = info->ctx; 138 struct cdc_ncm_ctx *ctx = info->ctx;
129 __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); 139 __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
130 u16 tci = 0; 140 u16 tci = 0;
141 bool is_ip;
131 u8 *c; 142 u8 *c;
132 143
133 if (!ctx) 144 if (!ctx)
@@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
137 if (skb->len <= ETH_HLEN) 148 if (skb->len <= ETH_HLEN)
138 goto error; 149 goto error;
139 150
151 /* Some applications using e.g. packet sockets will
152 * bypass the VLAN acceleration and create tagged
153 * ethernet frames directly. We primarily look for
154 * the accelerated out-of-band tag, but fall back if
155 * required
156 */
157 skb_reset_mac_header(skb);
158 if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
159 __vlan_get_tag(skb, &tci) == 0) {
160 is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
161 skb_pull(skb, VLAN_ETH_HLEN);
162 } else {
163 is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
164 skb_pull(skb, ETH_HLEN);
165 }
166
140 /* mapping VLANs to MBIM sessions: 167 /* mapping VLANs to MBIM sessions:
141 * no tag => IPS session <0> 168 * no tag => IPS session <0>
142 * 1 - 255 => IPS session <vlanid> 169 * 1 - 255 => IPS session <vlanid>
143 * 256 - 511 => DSS session <vlanid - 256> 170 * 256 - 511 => DSS session <vlanid - 256>
144 * 512 - 4095 => unsupported, drop 171 * 512 - 4095 => unsupported, drop
145 */ 172 */
146 vlan_get_tag(skb, &tci);
147
148 switch (tci & 0x0f00) { 173 switch (tci & 0x0f00) {
149 case 0x0000: /* VLAN ID 0 - 255 */ 174 case 0x0000: /* VLAN ID 0 - 255 */
150 /* verify that datagram is IPv4 or IPv6 */ 175 if (!is_ip)
151 skb_reset_mac_header(skb);
152 switch (eth_hdr(skb)->h_proto) {
153 case htons(ETH_P_IP):
154 case htons(ETH_P_IPV6):
155 break;
156 default:
157 goto error; 176 goto error;
158 }
159 c = (u8 *)&sign; 177 c = (u8 *)&sign;
160 c[3] = tci; 178 c[3] = tci;
161 break; 179 break;
@@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
169 "unsupported tci=0x%04x\n", tci); 187 "unsupported tci=0x%04x\n", tci);
170 goto error; 188 goto error;
171 } 189 }
172 skb_pull(skb, ETH_HLEN);
173 } 190 }
174 191
175 spin_lock_bh(&ctx->mtx); 192 spin_lock_bh(&ctx->mtx);
@@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
204 return; 221 return;
205 222
206 /* need to send the NA on the VLAN dev, if any */ 223 /* need to send the NA on the VLAN dev, if any */
207 if (tci) 224 rcu_read_lock();
225 if (tci) {
208 netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q), 226 netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
209 tci); 227 tci);
210 else 228 if (!netdev) {
229 rcu_read_unlock();
230 return;
231 }
232 } else {
211 netdev = dev->net; 233 netdev = dev->net;
212 if (!netdev) 234 }
213 return; 235 dev_hold(netdev);
236 rcu_read_unlock();
214 237
215 in6_dev = in6_dev_get(netdev); 238 in6_dev = in6_dev_get(netdev);
216 if (!in6_dev) 239 if (!in6_dev)
217 return; 240 goto out;
218 is_router = !!in6_dev->cnf.forwarding; 241 is_router = !!in6_dev->cnf.forwarding;
219 in6_dev_put(in6_dev); 242 in6_dev_put(in6_dev);
220 243
@@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
224 true /* solicited */, 247 true /* solicited */,
225 false /* override */, 248 false /* override */,
226 true /* inc_opt */); 249 true /* inc_opt */);
250out:
251 dev_put(netdev);
227} 252}
228 253
229static bool is_neigh_solicit(u8 *buf, size_t len) 254static bool is_neigh_solicit(u8 *buf, size_t len)
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 549dbac710ed..9a2bd11943eb 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -785,7 +785,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
785 skb_out->len > CDC_NCM_MIN_TX_PKT) 785 skb_out->len > CDC_NCM_MIN_TX_PKT)
786 memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, 786 memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
787 ctx->tx_max - skb_out->len); 787 ctx->tx_max - skb_out->len);
788 else if ((skb_out->len % dev->maxpacket) == 0) 788 else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
789 *skb_put(skb_out, 1) = 0; /* force short packet */ 789 *skb_put(skb_out, 1) = 0; /* force short packet */
790 790
791 /* set final frame length */ 791 /* set final frame length */
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index e3458e3c44f1..83208d4fdc59 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -669,6 +669,22 @@ static const struct usb_device_id products[] = {
669 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 669 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
670 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 670 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
671 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 671 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
672 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
673 {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
674 {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
675 {QMI_FIXED_INTF(0x16d8, 0x6280, 0)}, /* CMOTech CHU-628 */
676 {QMI_FIXED_INTF(0x16d8, 0x7001, 0)}, /* CMOTech CHU-720S */
677 {QMI_FIXED_INTF(0x16d8, 0x7002, 0)}, /* CMOTech 7002 */
678 {QMI_FIXED_INTF(0x16d8, 0x7003, 4)}, /* CMOTech CHU-629K */
679 {QMI_FIXED_INTF(0x16d8, 0x7004, 3)}, /* CMOTech 7004 */
680 {QMI_FIXED_INTF(0x16d8, 0x7006, 5)}, /* CMOTech CGU-629 */
681 {QMI_FIXED_INTF(0x16d8, 0x700a, 4)}, /* CMOTech CHU-629S */
682 {QMI_FIXED_INTF(0x16d8, 0x7211, 0)}, /* CMOTech CHU-720I */
683 {QMI_FIXED_INTF(0x16d8, 0x7212, 0)}, /* CMOTech 7212 */
684 {QMI_FIXED_INTF(0x16d8, 0x7213, 0)}, /* CMOTech 7213 */
685 {QMI_FIXED_INTF(0x16d8, 0x7251, 1)}, /* CMOTech 7251 */
686 {QMI_FIXED_INTF(0x16d8, 0x7252, 1)}, /* CMOTech 7252 */
687 {QMI_FIXED_INTF(0x16d8, 0x7253, 1)}, /* CMOTech 7253 */
672 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, 688 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
673 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, 689 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
674 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)}, 690 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
@@ -730,16 +746,28 @@ static const struct usb_device_id products[] = {
730 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 746 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
731 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ 747 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
732 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ 748 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
749 {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */
750 {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */
751 {QMI_FIXED_INTF(0x1199, 0x68c0, 11)}, /* Sierra Wireless MC73xx */
733 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 752 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
753 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
754 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
734 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ 755 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
735 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 756 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
757 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
736 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 758 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
737 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 759 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
738 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 760 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
739 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 761 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
740 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ 762 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
763 {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
741 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ 764 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
742 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ 765 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
766 {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
767 {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
768 {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
769 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
770 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
743 771
744 /* 4. Gobi 1000 devices */ 772 /* 4. Gobi 1000 devices */
745 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 773 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7b687469199b..8a852b5f215f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1285,7 +1285,7 @@ static int virtnet_set_channels(struct net_device *dev,
1285 if (channels->rx_count || channels->tx_count || channels->other_count) 1285 if (channels->rx_count || channels->tx_count || channels->other_count)
1286 return -EINVAL; 1286 return -EINVAL;
1287 1287
1288 if (queue_pairs > vi->max_queue_pairs) 1288 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
1289 return -EINVAL; 1289 return -EINVAL;
1290 1290
1291 get_online_cpus(); 1291 get_online_cpus();
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index c55e316373a1..4dbb2ed85b97 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -389,8 +389,8 @@ static inline size_t vxlan_nlmsg_size(void)
389 + nla_total_size(sizeof(struct nda_cacheinfo)); 389 + nla_total_size(sizeof(struct nda_cacheinfo));
390} 390}
391 391
392static void vxlan_fdb_notify(struct vxlan_dev *vxlan, 392static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
393 struct vxlan_fdb *fdb, int type) 393 struct vxlan_rdst *rd, int type)
394{ 394{
395 struct net *net = dev_net(vxlan->dev); 395 struct net *net = dev_net(vxlan->dev);
396 struct sk_buff *skb; 396 struct sk_buff *skb;
@@ -400,8 +400,7 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
400 if (skb == NULL) 400 if (skb == NULL)
401 goto errout; 401 goto errout;
402 402
403 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, 403 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
404 first_remote_rtnl(fdb));
405 if (err < 0) { 404 if (err < 0) {
406 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ 405 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
407 WARN_ON(err == -EMSGSIZE); 406 WARN_ON(err == -EMSGSIZE);
@@ -427,10 +426,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
427 .remote_vni = VXLAN_N_VID, 426 .remote_vni = VXLAN_N_VID,
428 }; 427 };
429 428
430 INIT_LIST_HEAD(&f.remotes); 429 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
431 list_add_rcu(&remote.list, &f.remotes);
432
433 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
434} 430}
435 431
436static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) 432static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
@@ -438,11 +434,11 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
438 struct vxlan_fdb f = { 434 struct vxlan_fdb f = {
439 .state = NUD_STALE, 435 .state = NUD_STALE,
440 }; 436 };
437 struct vxlan_rdst remote = { };
441 438
442 INIT_LIST_HEAD(&f.remotes);
443 memcpy(f.eth_addr, eth_addr, ETH_ALEN); 439 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
444 440
445 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH); 441 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
446} 442}
447 443
448/* Hash Ethernet address */ 444/* Hash Ethernet address */
@@ -533,7 +529,8 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f,
533 529
534/* Add/update destinations for multicast */ 530/* Add/update destinations for multicast */
535static int vxlan_fdb_append(struct vxlan_fdb *f, 531static int vxlan_fdb_append(struct vxlan_fdb *f,
536 union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex) 532 union vxlan_addr *ip, __be16 port, __u32 vni,
533 __u32 ifindex, struct vxlan_rdst **rdp)
537{ 534{
538 struct vxlan_rdst *rd; 535 struct vxlan_rdst *rd;
539 536
@@ -551,6 +548,7 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
551 548
552 list_add_tail_rcu(&rd->list, &f->remotes); 549 list_add_tail_rcu(&rd->list, &f->remotes);
553 550
551 *rdp = rd;
554 return 1; 552 return 1;
555} 553}
556 554
@@ -690,6 +688,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
690 __be16 port, __u32 vni, __u32 ifindex, 688 __be16 port, __u32 vni, __u32 ifindex,
691 __u8 ndm_flags) 689 __u8 ndm_flags)
692{ 690{
691 struct vxlan_rdst *rd = NULL;
693 struct vxlan_fdb *f; 692 struct vxlan_fdb *f;
694 int notify = 0; 693 int notify = 0;
695 694
@@ -726,7 +725,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
726 if ((flags & NLM_F_APPEND) && 725 if ((flags & NLM_F_APPEND) &&
727 (is_multicast_ether_addr(f->eth_addr) || 726 (is_multicast_ether_addr(f->eth_addr) ||
728 is_zero_ether_addr(f->eth_addr))) { 727 is_zero_ether_addr(f->eth_addr))) {
729 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex); 728 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
729 &rd);
730 730
731 if (rc < 0) 731 if (rc < 0)
732 return rc; 732 return rc;
@@ -756,15 +756,18 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
756 INIT_LIST_HEAD(&f->remotes); 756 INIT_LIST_HEAD(&f->remotes);
757 memcpy(f->eth_addr, mac, ETH_ALEN); 757 memcpy(f->eth_addr, mac, ETH_ALEN);
758 758
759 vxlan_fdb_append(f, ip, port, vni, ifindex); 759 vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
760 760
761 ++vxlan->addrcnt; 761 ++vxlan->addrcnt;
762 hlist_add_head_rcu(&f->hlist, 762 hlist_add_head_rcu(&f->hlist,
763 vxlan_fdb_head(vxlan, mac)); 763 vxlan_fdb_head(vxlan, mac));
764 } 764 }
765 765
766 if (notify) 766 if (notify) {
767 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH); 767 if (rd == NULL)
768 rd = first_remote_rtnl(f);
769 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
770 }
768 771
769 return 0; 772 return 0;
770} 773}
@@ -785,7 +788,7 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
785 "delete %pM\n", f->eth_addr); 788 "delete %pM\n", f->eth_addr);
786 789
787 --vxlan->addrcnt; 790 --vxlan->addrcnt;
788 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH); 791 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
789 792
790 hlist_del_rcu(&f->hlist); 793 hlist_del_rcu(&f->hlist);
791 call_rcu(&f->rcu, vxlan_fdb_free); 794 call_rcu(&f->rcu, vxlan_fdb_free);
@@ -919,6 +922,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
919 */ 922 */
920 if (rd && !list_is_singular(&f->remotes)) { 923 if (rd && !list_is_singular(&f->remotes)) {
921 list_del_rcu(&rd->list); 924 list_del_rcu(&rd->list);
925 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
922 kfree_rcu(rd, rcu); 926 kfree_rcu(rd, rcu);
923 goto out; 927 goto out;
924 } 928 }
@@ -993,7 +997,7 @@ static bool vxlan_snoop(struct net_device *dev,
993 997
994 rdst->remote_ip = *src_ip; 998 rdst->remote_ip = *src_ip;
995 f->updated = jiffies; 999 f->updated = jiffies;
996 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH); 1000 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
997 } else { 1001 } else {
998 /* learned new entry */ 1002 /* learned new entry */
999 spin_lock(&vxlan->hash_lock); 1003 spin_lock(&vxlan->hash_lock);
@@ -1755,8 +1759,8 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
1755 if (err) 1759 if (err)
1756 return err; 1760 return err;
1757 1761
1758 return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, 1762 return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
1759 false); 1763 tos, ttl, df, false);
1760} 1764}
1761EXPORT_SYMBOL_GPL(vxlan_xmit_skb); 1765EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
1762 1766
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 84734a805092..83c39e2858bf 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -1521,11 +1521,7 @@ static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
1521 cosa_putstatus(cosa, 0); 1521 cosa_putstatus(cosa, 0);
1522 cosa_getdata8(cosa); 1522 cosa_getdata8(cosa);
1523 cosa_putstatus(cosa, SR_RST); 1523 cosa_putstatus(cosa, SR_RST);
1524#ifdef MODULE
1525 msleep(500); 1524 msleep(500);
1526#else
1527 udelay(5*100000);
1528#endif
1529 /* Disable all IRQs from the card */ 1525 /* Disable all IRQs from the card */
1530 cosa_putstatus(cosa, 0); 1526 cosa_putstatus(cosa, 0);
1531 1527
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index a0398fe3eb28..be3eb2a8d602 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -86,7 +86,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
86 int irq; 86 int irq;
87 int ret = 0; 87 int ret = 0;
88 struct ath_hw *ah; 88 struct ath_hw *ah;
89 struct ath_common *common;
90 char hw_name[64]; 89 char hw_name[64];
91 90
92 if (!dev_get_platdata(&pdev->dev)) { 91 if (!dev_get_platdata(&pdev->dev)) {
@@ -146,9 +145,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
146 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", 145 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
147 hw_name, (unsigned long)mem, irq); 146 hw_name, (unsigned long)mem, irq);
148 147
149 common = ath9k_hw_common(sc->sc_ah);
150 /* Will be cleared in ath9k_start() */
151 set_bit(ATH_OP_INVALID, &common->op_flags);
152 return 0; 148 return 0;
153 149
154 err_irq: 150 err_irq:
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 6d47783f2e5b..ba502a2d199b 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -155,6 +155,9 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
155 ATH9K_ANI_RSSI_THR_LOW, 155 ATH9K_ANI_RSSI_THR_LOW,
156 ATH9K_ANI_RSSI_THR_HIGH); 156 ATH9K_ANI_RSSI_THR_HIGH);
157 157
158 if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_OFDM_DEF_LEVEL)
159 immunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
160
158 if (!scan) 161 if (!scan)
159 aniState->ofdmNoiseImmunityLevel = immunityLevel; 162 aniState->ofdmNoiseImmunityLevel = immunityLevel;
160 163
@@ -235,6 +238,9 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
235 BEACON_RSSI(ah), ATH9K_ANI_RSSI_THR_LOW, 238 BEACON_RSSI(ah), ATH9K_ANI_RSSI_THR_LOW,
236 ATH9K_ANI_RSSI_THR_HIGH); 239 ATH9K_ANI_RSSI_THR_HIGH);
237 240
241 if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_CCK_DEF_LEVEL)
242 immunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
243
238 if (ah->opmode == NL80211_IFTYPE_STATION && 244 if (ah->opmode == NL80211_IFTYPE_STATION &&
239 BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_LOW && 245 BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_LOW &&
240 immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI) 246 immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 44d74495c4de..3ba03dde4215 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -251,7 +251,6 @@ struct ath_atx_tid {
251 251
252 s8 bar_index; 252 s8 bar_index;
253 bool sched; 253 bool sched;
254 bool paused;
255 bool active; 254 bool active;
256}; 255};
257 256
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index d76e6e0120d2..ffca918ff16a 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -72,7 +72,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
72 ath_txq_lock(sc, txq); 72 ath_txq_lock(sc, txq);
73 if (tid->active) { 73 if (tid->active) {
74 len += scnprintf(buf + len, size - len, 74 len += scnprintf(buf + len, size - len,
75 "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n", 75 "%3d%11d%10d%10d%10d%10d%9d%6d\n",
76 tid->tidno, 76 tid->tidno,
77 tid->seq_start, 77 tid->seq_start,
78 tid->seq_next, 78 tid->seq_next,
@@ -80,8 +80,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
80 tid->baw_head, 80 tid->baw_head,
81 tid->baw_tail, 81 tid->baw_tail,
82 tid->bar_index, 82 tid->bar_index,
83 tid->sched, 83 tid->sched);
84 tid->paused);
85 } 84 }
86 ath_txq_unlock(sc, txq); 85 ath_txq_unlock(sc, txq);
87 } 86 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index f46cd0250e48..5627917c5ff7 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
95 95
96 if ((vif->type == NL80211_IFTYPE_AP || 96 if ((vif->type == NL80211_IFTYPE_AP ||
97 vif->type == NL80211_IFTYPE_MESH_POINT) && 97 vif->type == NL80211_IFTYPE_MESH_POINT) &&
98 bss_conf->enable_beacon) 98 bss_conf->enable_beacon) {
99 priv->reconfig_beacon = true; 99 priv->reconfig_beacon = true;
100 priv->rearm_ani = true;
101 }
100 102
101 if (bss_conf->assoc) { 103 if (bss_conf->assoc) {
102 priv->rearm_ani = true; 104 priv->rearm_ani = true;
@@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
257 259
258 ath9k_htc_ps_wakeup(priv); 260 ath9k_htc_ps_wakeup(priv);
259 261
262 ath9k_htc_stop_ani(priv);
260 del_timer_sync(&priv->tx.cleanup_timer); 263 del_timer_sync(&priv->tx.cleanup_timer);
261 ath9k_htc_tx_drain(priv); 264 ath9k_htc_tx_drain(priv);
262 265
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index cbbb02a6b13b..36ae6490e554 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -783,6 +783,9 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
783 common = ath9k_hw_common(ah); 783 common = ath9k_hw_common(ah);
784 ath9k_set_hw_capab(sc, hw); 784 ath9k_set_hw_capab(sc, hw);
785 785
786 /* Will be cleared in ath9k_start() */
787 set_bit(ATH_OP_INVALID, &common->op_flags);
788
786 /* Initialize regulatory */ 789 /* Initialize regulatory */
787 error = ath_regd_init(&common->regulatory, sc->hw->wiphy, 790 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
788 ath9k_reg_notifier); 791 ath9k_reg_notifier);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 25304adece57..914dbc6b1720 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -784,7 +784,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
784{ 784{
785 struct ath_softc *sc; 785 struct ath_softc *sc;
786 struct ieee80211_hw *hw; 786 struct ieee80211_hw *hw;
787 struct ath_common *common;
788 u8 csz; 787 u8 csz;
789 u32 val; 788 u32 val;
790 int ret = 0; 789 int ret = 0;
@@ -877,10 +876,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
877 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", 876 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
878 hw_name, (unsigned long)sc->mem, pdev->irq); 877 hw_name, (unsigned long)sc->mem, pdev->irq);
879 878
880 /* Will be cleared in ath9k_start() */
881 common = ath9k_hw_common(sc->sc_ah);
882 set_bit(ATH_OP_INVALID, &common->op_flags);
883
884 return 0; 879 return 0;
885 880
886err_init: 881err_init:
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 6c9accdb52e4..19df969ec909 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -975,6 +975,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
975 u64 tsf = 0; 975 u64 tsf = 0;
976 unsigned long flags; 976 unsigned long flags;
977 dma_addr_t new_buf_addr; 977 dma_addr_t new_buf_addr;
978 unsigned int budget = 512;
978 979
979 if (edma) 980 if (edma)
980 dma_type = DMA_BIDIRECTIONAL; 981 dma_type = DMA_BIDIRECTIONAL;
@@ -1113,15 +1114,17 @@ requeue_drop_frag:
1113 } 1114 }
1114requeue: 1115requeue:
1115 list_add_tail(&bf->list, &sc->rx.rxbuf); 1116 list_add_tail(&bf->list, &sc->rx.rxbuf);
1116 if (flush)
1117 continue;
1118 1117
1119 if (edma) { 1118 if (edma) {
1120 ath_rx_edma_buf_link(sc, qtype); 1119 ath_rx_edma_buf_link(sc, qtype);
1121 } else { 1120 } else {
1122 ath_rx_buf_relink(sc, bf); 1121 ath_rx_buf_relink(sc, bf);
1123 ath9k_hw_rxena(ah); 1122 if (!flush)
1123 ath9k_hw_rxena(ah);
1124 } 1124 }
1125
1126 if (!budget--)
1127 break;
1125 } while (1); 1128 } while (1);
1126 1129
1127 if (!(ah->imask & ATH9K_INT_RXEOL)) { 1130 if (!(ah->imask & ATH9K_INT_RXEOL)) {
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 87cbec47fb48..66acb2cbd9df 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -107,9 +107,6 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
107{ 107{
108 struct ath_atx_ac *ac = tid->ac; 108 struct ath_atx_ac *ac = tid->ac;
109 109
110 if (tid->paused)
111 return;
112
113 if (tid->sched) 110 if (tid->sched)
114 return; 111 return;
115 112
@@ -1407,7 +1404,6 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1407 ath_tx_tid_change_state(sc, txtid); 1404 ath_tx_tid_change_state(sc, txtid);
1408 1405
1409 txtid->active = true; 1406 txtid->active = true;
1410 txtid->paused = true;
1411 *ssn = txtid->seq_start = txtid->seq_next; 1407 *ssn = txtid->seq_start = txtid->seq_next;
1412 txtid->bar_index = -1; 1408 txtid->bar_index = -1;
1413 1409
@@ -1427,7 +1423,6 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1427 1423
1428 ath_txq_lock(sc, txq); 1424 ath_txq_lock(sc, txq);
1429 txtid->active = false; 1425 txtid->active = false;
1430 txtid->paused = false;
1431 ath_tx_flush_tid(sc, txtid); 1426 ath_tx_flush_tid(sc, txtid);
1432 ath_tx_tid_change_state(sc, txtid); 1427 ath_tx_tid_change_state(sc, txtid);
1433 ath_txq_unlock_complete(sc, txq); 1428 ath_txq_unlock_complete(sc, txq);
@@ -1487,7 +1482,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1487 ath_txq_lock(sc, txq); 1482 ath_txq_lock(sc, txq);
1488 ac->clear_ps_filter = true; 1483 ac->clear_ps_filter = true;
1489 1484
1490 if (!tid->paused && ath_tid_has_buffered(tid)) { 1485 if (ath_tid_has_buffered(tid)) {
1491 ath_tx_queue_tid(txq, tid); 1486 ath_tx_queue_tid(txq, tid);
1492 ath_txq_schedule(sc, txq); 1487 ath_txq_schedule(sc, txq);
1493 } 1488 }
@@ -1510,7 +1505,6 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
1510 ath_txq_lock(sc, txq); 1505 ath_txq_lock(sc, txq);
1511 1506
1512 tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 1507 tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1513 tid->paused = false;
1514 1508
1515 if (ath_tid_has_buffered(tid)) { 1509 if (ath_tid_has_buffered(tid)) {
1516 ath_tx_queue_tid(txq, tid); 1510 ath_tx_queue_tid(txq, tid);
@@ -1544,8 +1538,6 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
1544 continue; 1538 continue;
1545 1539
1546 tid = ATH_AN_2_TID(an, i); 1540 tid = ATH_AN_2_TID(an, i);
1547 if (tid->paused)
1548 continue;
1549 1541
1550 ath_txq_lock(sc, tid->ac->txq); 1542 ath_txq_lock(sc, tid->ac->txq);
1551 while (nframes > 0) { 1543 while (nframes > 0) {
@@ -1844,9 +1836,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1844 list_del(&tid->list); 1836 list_del(&tid->list);
1845 tid->sched = false; 1837 tid->sched = false;
1846 1838
1847 if (tid->paused)
1848 continue;
1849
1850 if (ath_tx_sched_aggr(sc, txq, tid, &stop)) 1839 if (ath_tx_sched_aggr(sc, txq, tid, &stop))
1851 sent = true; 1840 sent = true;
1852 1841
@@ -2698,7 +2687,6 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2698 tid->baw_size = WME_MAX_BA; 2687 tid->baw_size = WME_MAX_BA;
2699 tid->baw_head = tid->baw_tail = 0; 2688 tid->baw_head = tid->baw_tail = 0;
2700 tid->sched = false; 2689 tid->sched = false;
2701 tid->paused = false;
2702 tid->active = false; 2690 tid->active = false;
2703 __skb_queue_head_init(&tid->buf_q); 2691 __skb_queue_head_init(&tid->buf_q);
2704 __skb_queue_head_init(&tid->retry_q); 2692 __skb_queue_head_init(&tid->retry_q);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index df130ef53d1c..c7c9f15c0fe0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -303,10 +303,10 @@ static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
303 303
304 ci = core->chip; 304 ci = core->chip;
305 305
306 /* if core is already in reset, just return */ 306 /* if core is already in reset, skip reset */
307 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL); 307 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
308 if ((regdata & BCMA_RESET_CTL_RESET) != 0) 308 if ((regdata & BCMA_RESET_CTL_RESET) != 0)
309 return; 309 goto in_reset_configure;
310 310
311 /* configure reset */ 311 /* configure reset */
312 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, 312 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
@@ -322,6 +322,7 @@ static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
322 SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) != 322 SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) !=
323 BCMA_RESET_CTL_RESET, 300); 323 BCMA_RESET_CTL_RESET, 300);
324 324
325in_reset_configure:
325 /* in-reset configure */ 326 /* in-reset configure */
326 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, 327 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
327 reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK); 328 reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index afb3d15e38ff..be1985296bdc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
4948 if (!err) { 4948 if (!err) {
4949 /* only set 2G bandwidth using bw_cap command */ 4949 /* only set 2G bandwidth using bw_cap command */
4950 band_bwcap.band = cpu_to_le32(WLC_BAND_2G); 4950 band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
4951 band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT); 4951 band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
4952 err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap, 4952 err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
4953 sizeof(band_bwcap)); 4953 sizeof(band_bwcap));
4954 } else { 4954 } else {
diff --git a/drivers/net/wireless/cw1200/debug.c b/drivers/net/wireless/cw1200/debug.c
index e323b4d54338..34f97c31eecf 100644
--- a/drivers/net/wireless/cw1200/debug.c
+++ b/drivers/net/wireless/cw1200/debug.c
@@ -41,6 +41,8 @@ static const char * const cw1200_debug_link_id[] = {
41 "REQ", 41 "REQ",
42 "SOFT", 42 "SOFT",
43 "HARD", 43 "HARD",
44 "RESET",
45 "RESET_REMAP",
44}; 46};
45 47
46static const char *cw1200_debug_mode(int mode) 48static const char *cw1200_debug_mode(int mode)
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 003a546571d4..4c2d4ef28b22 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -67,8 +67,8 @@
67#include "iwl-agn-hw.h" 67#include "iwl-agn-hw.h"
68 68
69/* Highest firmware API version supported */ 69/* Highest firmware API version supported */
70#define IWL7260_UCODE_API_MAX 8 70#define IWL7260_UCODE_API_MAX 9
71#define IWL3160_UCODE_API_MAX 8 71#define IWL3160_UCODE_API_MAX 9
72 72
73/* Oldest version we won't warn about */ 73/* Oldest version we won't warn about */
74#define IWL7260_UCODE_API_OK 8 74#define IWL7260_UCODE_API_OK 8
@@ -244,3 +244,4 @@ const struct iwl_cfg iwl7265_n_cfg = {
244 244
245MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 245MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
246MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); 246MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
247MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 685f7e8e6943..0489314425cb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -190,7 +190,7 @@ static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
190 cpu_to_le32(0xcc00aaaa), 190 cpu_to_le32(0xcc00aaaa),
191 cpu_to_le32(0x0000aaaa), 191 cpu_to_le32(0x0000aaaa),
192 cpu_to_le32(0xc0004000), 192 cpu_to_le32(0xc0004000),
193 cpu_to_le32(0x00000000), 193 cpu_to_le32(0x00004000),
194 cpu_to_le32(0xf0005000), 194 cpu_to_le32(0xf0005000),
195 cpu_to_le32(0xf0005000), 195 cpu_to_le32(0xf0005000),
196 }, 196 },
@@ -213,16 +213,16 @@ static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
213 /* Tx Tx disabled */ 213 /* Tx Tx disabled */
214 cpu_to_le32(0xaaaaaaaa), 214 cpu_to_le32(0xaaaaaaaa),
215 cpu_to_le32(0xaaaaaaaa), 215 cpu_to_le32(0xaaaaaaaa),
216 cpu_to_le32(0xaaaaaaaa), 216 cpu_to_le32(0xeeaaaaaa),
217 cpu_to_le32(0xaaaaaaaa), 217 cpu_to_le32(0xaaaaaaaa),
218 cpu_to_le32(0xcc00ff28), 218 cpu_to_le32(0xcc00ff28),
219 cpu_to_le32(0x0000aaaa), 219 cpu_to_le32(0x0000aaaa),
220 cpu_to_le32(0xcc00aaaa), 220 cpu_to_le32(0xcc00aaaa),
221 cpu_to_le32(0x0000aaaa), 221 cpu_to_le32(0x0000aaaa),
222 cpu_to_le32(0xC0004000), 222 cpu_to_le32(0xc0004000),
223 cpu_to_le32(0xC0004000), 223 cpu_to_le32(0xc0004000),
224 cpu_to_le32(0xF0005000), 224 cpu_to_le32(0xf0005000),
225 cpu_to_le32(0xF0005000), 225 cpu_to_le32(0xf0005000),
226 }, 226 },
227}; 227};
228 228
@@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
611 bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); 611 bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
612 612
613 if (IWL_MVM_BT_COEX_CORUNNING) { 613 if (IWL_MVM_BT_COEX_CORUNNING) {
614 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 | 614 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
615 BT_VALID_CORUN_LUT_40); 615 BT_VALID_CORUN_LUT_40);
616 bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING); 616 bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
617 } 617 }
618 618
619 if (IWL_MVM_BT_COEX_MPLUT) { 619 if (IWL_MVM_BT_COEX_MPLUT) {
620 bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT); 620 bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
621 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); 621 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
622 } 622 }
623 623
624 if (mvm->cfg->bt_shared_single_ant) 624 if (mvm->cfg->bt_shared_single_ant)
@@ -1262,6 +1262,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
1262 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1262 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1263 u32 ant_isolation = le32_to_cpup((void *)pkt->data); 1263 u32 ant_isolation = le32_to_cpup((void *)pkt->data);
1264 u8 __maybe_unused lower_bound, upper_bound; 1264 u8 __maybe_unused lower_bound, upper_bound;
1265 int ret;
1265 u8 lut; 1266 u8 lut;
1266 1267
1267 struct iwl_bt_coex_cmd *bt_cmd; 1268 struct iwl_bt_coex_cmd *bt_cmd;
@@ -1318,5 +1319,8 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
1318 memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20, 1319 memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
1319 sizeof(bt_cmd->bt4_corun_lut40)); 1320 sizeof(bt_cmd->bt4_corun_lut40));
1320 1321
1321 return 0; 1322 ret = iwl_mvm_send_cmd(mvm, &cmd);
1323
1324 kfree(bt_cmd);
1325 return ret;
1322} 1326}
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 9426905de6b2..d73a89ecd78a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -183,9 +183,9 @@ enum iwl_scan_type {
183 * this number of packets were received (typically 1) 183 * this number of packets were received (typically 1)
184 * @passive2active: is auto switching from passive to active during scan allowed 184 * @passive2active: is auto switching from passive to active during scan allowed
185 * @rxchain_sel_flags: RXON_RX_CHAIN_* 185 * @rxchain_sel_flags: RXON_RX_CHAIN_*
186 * @max_out_time: in usecs, max out of serving channel time 186 * @max_out_time: in TUs, max out of serving channel time
187 * @suspend_time: how long to pause scan when returning to service channel: 187 * @suspend_time: how long to pause scan when returning to service channel:
188 * bits 0-19: beacon interal in usecs (suspend before executing) 188 * bits 0-19: beacon interal in TUs (suspend before executing)
189 * bits 20-23: reserved 189 * bits 20-23: reserved
190 * bits 24-31: number of beacons (suspend between channels) 190 * bits 24-31: number of beacons (suspend between channels)
191 * @rxon_flags: RXON_FLG_* 191 * @rxon_flags: RXON_FLG_*
@@ -383,8 +383,8 @@ enum scan_framework_client {
383 * @quiet_plcp_th: quiet channel num of packets threshold 383 * @quiet_plcp_th: quiet channel num of packets threshold
384 * @good_CRC_th: passive to active promotion threshold 384 * @good_CRC_th: passive to active promotion threshold
385 * @rx_chain: RXON rx chain. 385 * @rx_chain: RXON rx chain.
386 * @max_out_time: max uSec to be out of assoceated channel 386 * @max_out_time: max TUs to be out of assoceated channel
387 * @suspend_time: pause scan this long when returning to service channel 387 * @suspend_time: pause scan this TUs when returning to service channel
388 * @flags: RXON flags 388 * @flags: RXON flags
389 * @filter_flags: RXONfilter 389 * @filter_flags: RXONfilter
390 * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz. 390 * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz.
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 4dd9ff43b8b6..b41dc84e9431 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1007,7 +1007,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1007 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); 1007 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1008 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); 1008 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1009 1009
1010 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd); 1010 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
1011 if (ret) 1011 if (ret)
1012 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); 1012 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1013} 1013}
@@ -1023,7 +1023,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1023 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) 1023 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1024 return; 1024 return;
1025 1025
1026 ieee80211_iterate_active_interfaces( 1026 ieee80211_iterate_active_interfaces_atomic(
1027 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1027 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1028 iwl_mvm_mc_iface_iterator, &iter_data); 1028 iwl_mvm_mc_iface_iterator, &iter_data);
1029} 1029}
@@ -1332,6 +1332,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1332 */ 1332 */
1333 iwl_mvm_remove_time_event(mvm, mvmvif, 1333 iwl_mvm_remove_time_event(mvm, mvmvif,
1334 &mvmvif->time_event_data); 1334 &mvmvif->time_event_data);
1335 iwl_mvm_sf_update(mvm, vif, false);
1335 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC)); 1336 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
1336 } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | 1337 } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
1337 BSS_CHANGED_QOS)) { 1338 BSS_CHANGED_QOS)) {
@@ -1806,6 +1807,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
1806 1807
1807 mutex_lock(&mvm->mutex); 1808 mutex_lock(&mvm->mutex);
1808 1809
1810 if (!iwl_mvm_is_idle(mvm)) {
1811 ret = -EBUSY;
1812 goto out;
1813 }
1814
1809 switch (mvm->scan_status) { 1815 switch (mvm->scan_status) {
1810 case IWL_MVM_SCAN_OS: 1816 case IWL_MVM_SCAN_OS:
1811 IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n"); 1817 IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index d564233a65da..f1ec0986c3c9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -1003,6 +1003,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
1003 return mvmvif->low_latency; 1003 return mvmvif->low_latency;
1004} 1004}
1005 1005
1006/* Assoc status */
1007bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
1008
1006/* Thermal management and CT-kill */ 1009/* Thermal management and CT-kill */
1007void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); 1010void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
1008void iwl_mvm_tt_handler(struct iwl_mvm *mvm); 1011void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 568abd61b14f..e1c838899363 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -59,7 +59,7 @@
59/* max allowed rate miss before sync LQ cmd */ 59/* max allowed rate miss before sync LQ cmd */
60#define IWL_MISSED_RATE_MAX 15 60#define IWL_MISSED_RATE_MAX 15
61#define RS_STAY_IN_COLUMN_TIMEOUT (5*HZ) 61#define RS_STAY_IN_COLUMN_TIMEOUT (5*HZ)
62 62#define RS_IDLE_TIMEOUT (5*HZ)
63 63
64static u8 rs_ht_to_legacy[] = { 64static u8 rs_ht_to_legacy[] = {
65 [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX, 65 [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX,
@@ -142,7 +142,7 @@ enum rs_column_mode {
142 RS_MIMO2, 142 RS_MIMO2,
143}; 143};
144 144
145#define MAX_NEXT_COLUMNS 5 145#define MAX_NEXT_COLUMNS 7
146#define MAX_COLUMN_CHECKS 3 146#define MAX_COLUMN_CHECKS 3
147 147
148typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, 148typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
@@ -212,8 +212,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
212 RS_COLUMN_LEGACY_ANT_B, 212 RS_COLUMN_LEGACY_ANT_B,
213 RS_COLUMN_SISO_ANT_A, 213 RS_COLUMN_SISO_ANT_A,
214 RS_COLUMN_SISO_ANT_B, 214 RS_COLUMN_SISO_ANT_B,
215 RS_COLUMN_MIMO2, 215 RS_COLUMN_INVALID,
216 RS_COLUMN_MIMO2_SGI, 216 RS_COLUMN_INVALID,
217 RS_COLUMN_INVALID,
218 RS_COLUMN_INVALID,
217 }, 219 },
218 }, 220 },
219 [RS_COLUMN_LEGACY_ANT_B] = { 221 [RS_COLUMN_LEGACY_ANT_B] = {
@@ -223,8 +225,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
223 RS_COLUMN_LEGACY_ANT_A, 225 RS_COLUMN_LEGACY_ANT_A,
224 RS_COLUMN_SISO_ANT_A, 226 RS_COLUMN_SISO_ANT_A,
225 RS_COLUMN_SISO_ANT_B, 227 RS_COLUMN_SISO_ANT_B,
226 RS_COLUMN_MIMO2, 228 RS_COLUMN_INVALID,
227 RS_COLUMN_MIMO2_SGI, 229 RS_COLUMN_INVALID,
230 RS_COLUMN_INVALID,
231 RS_COLUMN_INVALID,
228 }, 232 },
229 }, 233 },
230 [RS_COLUMN_SISO_ANT_A] = { 234 [RS_COLUMN_SISO_ANT_A] = {
@@ -235,7 +239,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
235 RS_COLUMN_MIMO2, 239 RS_COLUMN_MIMO2,
236 RS_COLUMN_SISO_ANT_A_SGI, 240 RS_COLUMN_SISO_ANT_A_SGI,
237 RS_COLUMN_SISO_ANT_B_SGI, 241 RS_COLUMN_SISO_ANT_B_SGI,
238 RS_COLUMN_MIMO2_SGI, 242 RS_COLUMN_LEGACY_ANT_A,
243 RS_COLUMN_LEGACY_ANT_B,
244 RS_COLUMN_INVALID,
239 }, 245 },
240 .checks = { 246 .checks = {
241 rs_siso_allow, 247 rs_siso_allow,
@@ -249,7 +255,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
249 RS_COLUMN_MIMO2, 255 RS_COLUMN_MIMO2,
250 RS_COLUMN_SISO_ANT_B_SGI, 256 RS_COLUMN_SISO_ANT_B_SGI,
251 RS_COLUMN_SISO_ANT_A_SGI, 257 RS_COLUMN_SISO_ANT_A_SGI,
252 RS_COLUMN_MIMO2_SGI, 258 RS_COLUMN_LEGACY_ANT_A,
259 RS_COLUMN_LEGACY_ANT_B,
260 RS_COLUMN_INVALID,
253 }, 261 },
254 .checks = { 262 .checks = {
255 rs_siso_allow, 263 rs_siso_allow,
@@ -265,6 +273,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
265 RS_COLUMN_SISO_ANT_A, 273 RS_COLUMN_SISO_ANT_A,
266 RS_COLUMN_SISO_ANT_B, 274 RS_COLUMN_SISO_ANT_B,
267 RS_COLUMN_MIMO2, 275 RS_COLUMN_MIMO2,
276 RS_COLUMN_LEGACY_ANT_A,
277 RS_COLUMN_LEGACY_ANT_B,
268 }, 278 },
269 .checks = { 279 .checks = {
270 rs_siso_allow, 280 rs_siso_allow,
@@ -281,6 +291,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
281 RS_COLUMN_SISO_ANT_B, 291 RS_COLUMN_SISO_ANT_B,
282 RS_COLUMN_SISO_ANT_A, 292 RS_COLUMN_SISO_ANT_A,
283 RS_COLUMN_MIMO2, 293 RS_COLUMN_MIMO2,
294 RS_COLUMN_LEGACY_ANT_A,
295 RS_COLUMN_LEGACY_ANT_B,
284 }, 296 },
285 .checks = { 297 .checks = {
286 rs_siso_allow, 298 rs_siso_allow,
@@ -296,6 +308,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
296 RS_COLUMN_SISO_ANT_A_SGI, 308 RS_COLUMN_SISO_ANT_A_SGI,
297 RS_COLUMN_SISO_ANT_B_SGI, 309 RS_COLUMN_SISO_ANT_B_SGI,
298 RS_COLUMN_MIMO2_SGI, 310 RS_COLUMN_MIMO2_SGI,
311 RS_COLUMN_LEGACY_ANT_A,
312 RS_COLUMN_LEGACY_ANT_B,
299 }, 313 },
300 .checks = { 314 .checks = {
301 rs_mimo_allow, 315 rs_mimo_allow,
@@ -311,6 +325,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
311 RS_COLUMN_SISO_ANT_A, 325 RS_COLUMN_SISO_ANT_A,
312 RS_COLUMN_SISO_ANT_B, 326 RS_COLUMN_SISO_ANT_B,
313 RS_COLUMN_MIMO2, 327 RS_COLUMN_MIMO2,
328 RS_COLUMN_LEGACY_ANT_A,
329 RS_COLUMN_LEGACY_ANT_B,
314 }, 330 },
315 .checks = { 331 .checks = {
316 rs_mimo_allow, 332 rs_mimo_allow,
@@ -503,10 +519,12 @@ static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
503 window->average_tpt = IWL_INVALID_VALUE; 519 window->average_tpt = IWL_INVALID_VALUE;
504} 520}
505 521
506static void rs_rate_scale_clear_tbl_windows(struct iwl_scale_tbl_info *tbl) 522static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
523 struct iwl_scale_tbl_info *tbl)
507{ 524{
508 int i; 525 int i;
509 526
527 IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
510 for (i = 0; i < IWL_RATE_COUNT; i++) 528 for (i = 0; i < IWL_RATE_COUNT; i++)
511 rs_rate_scale_clear_window(&tbl->win[i]); 529 rs_rate_scale_clear_window(&tbl->win[i]);
512} 530}
@@ -992,6 +1010,13 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
992 return; 1010 return;
993 } 1011 }
994 1012
1013#ifdef CONFIG_MAC80211_DEBUGFS
1014 /* Disable last tx check if we are debugging with fixed rate */
1015 if (lq_sta->dbg_fixed_rate) {
1016 IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
1017 return;
1018 }
1019#endif
995 if (!ieee80211_is_data(hdr->frame_control) || 1020 if (!ieee80211_is_data(hdr->frame_control) ||
996 info->flags & IEEE80211_TX_CTL_NO_ACK) 1021 info->flags & IEEE80211_TX_CTL_NO_ACK)
997 return; 1022 return;
@@ -1034,6 +1059,18 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
1034 mac_index++; 1059 mac_index++;
1035 } 1060 }
1036 1061
1062 if (time_after(jiffies,
1063 (unsigned long)(lq_sta->last_tx + RS_IDLE_TIMEOUT))) {
1064 int tid;
1065 IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
1066 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
1067 ieee80211_stop_tx_ba_session(sta, tid);
1068
1069 iwl_mvm_rs_rate_init(mvm, sta, sband->band, false);
1070 return;
1071 }
1072 lq_sta->last_tx = jiffies;
1073
1037 /* Here we actually compare this rate to the latest LQ command */ 1074 /* Here we actually compare this rate to the latest LQ command */
1038 if ((mac_index < 0) || 1075 if ((mac_index < 0) ||
1039 (rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || 1076 (rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
@@ -1186,9 +1223,26 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
1186 lq_sta->visited_columns = 0; 1223 lq_sta->visited_columns = 0;
1187} 1224}
1188 1225
1226static int rs_get_max_allowed_rate(struct iwl_lq_sta *lq_sta,
1227 const struct rs_tx_column *column)
1228{
1229 switch (column->mode) {
1230 case RS_LEGACY:
1231 return lq_sta->max_legacy_rate_idx;
1232 case RS_SISO:
1233 return lq_sta->max_siso_rate_idx;
1234 case RS_MIMO2:
1235 return lq_sta->max_mimo2_rate_idx;
1236 default:
1237 WARN_ON_ONCE(1);
1238 }
1239
1240 return lq_sta->max_legacy_rate_idx;
1241}
1242
1189static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta, 1243static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1190 const struct rs_tx_column *column, 1244 const struct rs_tx_column *column,
1191 u32 bw) 1245 u32 bw)
1192{ 1246{
1193 /* Used to choose among HT tables */ 1247 /* Used to choose among HT tables */
1194 const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT]; 1248 const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
@@ -1438,7 +1492,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1438 1492
1439 IWL_DEBUG_RATE(mvm, 1493 IWL_DEBUG_RATE(mvm,
1440 "LQ: stay in table clear win\n"); 1494 "LQ: stay in table clear win\n");
1441 rs_rate_scale_clear_tbl_windows(tbl); 1495 rs_rate_scale_clear_tbl_windows(mvm, tbl);
1442 } 1496 }
1443 } 1497 }
1444 1498
@@ -1446,8 +1500,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1446 * bitmaps and stats in active table (this will become the new 1500 * bitmaps and stats in active table (this will become the new
1447 * "search" table). */ 1501 * "search" table). */
1448 if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) { 1502 if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
1449 IWL_DEBUG_RATE(mvm, "Clearing up window stats\n"); 1503 rs_rate_scale_clear_tbl_windows(mvm, tbl);
1450 rs_rate_scale_clear_tbl_windows(tbl);
1451 } 1504 }
1452 } 1505 }
1453} 1506}
@@ -1485,14 +1538,14 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1485 struct ieee80211_sta *sta, 1538 struct ieee80211_sta *sta,
1486 struct iwl_scale_tbl_info *tbl) 1539 struct iwl_scale_tbl_info *tbl)
1487{ 1540{
1488 int i, j, n; 1541 int i, j, max_rate;
1489 enum rs_column next_col_id; 1542 enum rs_column next_col_id;
1490 const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column]; 1543 const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
1491 const struct rs_tx_column *next_col; 1544 const struct rs_tx_column *next_col;
1492 allow_column_func_t allow_func; 1545 allow_column_func_t allow_func;
1493 u8 valid_ants = mvm->fw->valid_tx_ant; 1546 u8 valid_ants = mvm->fw->valid_tx_ant;
1494 const u16 *expected_tpt_tbl; 1547 const u16 *expected_tpt_tbl;
1495 s32 tpt, max_expected_tpt; 1548 u16 tpt, max_expected_tpt;
1496 1549
1497 for (i = 0; i < MAX_NEXT_COLUMNS; i++) { 1550 for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
1498 next_col_id = curr_col->next_columns[i]; 1551 next_col_id = curr_col->next_columns[i];
@@ -1535,11 +1588,11 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1535 if (WARN_ON_ONCE(!expected_tpt_tbl)) 1588 if (WARN_ON_ONCE(!expected_tpt_tbl))
1536 continue; 1589 continue;
1537 1590
1538 max_expected_tpt = 0; 1591 max_rate = rs_get_max_allowed_rate(lq_sta, next_col);
1539 for (n = 0; n < IWL_RATE_COUNT; n++) 1592 if (WARN_ON_ONCE(max_rate == IWL_RATE_INVALID))
1540 if (expected_tpt_tbl[n] > max_expected_tpt) 1593 continue;
1541 max_expected_tpt = expected_tpt_tbl[n];
1542 1594
1595 max_expected_tpt = expected_tpt_tbl[max_rate];
1543 if (tpt >= max_expected_tpt) { 1596 if (tpt >= max_expected_tpt) {
1544 IWL_DEBUG_RATE(mvm, 1597 IWL_DEBUG_RATE(mvm,
1545 "Skip column %d: can't beat current TPT. Max expected %d current %d\n", 1598 "Skip column %d: can't beat current TPT. Max expected %d current %d\n",
@@ -1547,14 +1600,15 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1547 continue; 1600 continue;
1548 } 1601 }
1549 1602
1603 IWL_DEBUG_RATE(mvm,
1604 "Found potential column %d. Max expected %d current %d\n",
1605 next_col_id, max_expected_tpt, tpt);
1550 break; 1606 break;
1551 } 1607 }
1552 1608
1553 if (i == MAX_NEXT_COLUMNS) 1609 if (i == MAX_NEXT_COLUMNS)
1554 return RS_COLUMN_INVALID; 1610 return RS_COLUMN_INVALID;
1555 1611
1556 IWL_DEBUG_RATE(mvm, "Found potential column %d\n", next_col_id);
1557
1558 return next_col_id; 1612 return next_col_id;
1559} 1613}
1560 1614
@@ -1640,85 +1694,76 @@ static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
1640{ 1694{
1641 enum rs_action action = RS_ACTION_STAY; 1695 enum rs_action action = RS_ACTION_STAY;
1642 1696
1643 /* Too many failures, decrease rate */
1644 if ((sr <= RS_SR_FORCE_DECREASE) || (current_tpt == 0)) { 1697 if ((sr <= RS_SR_FORCE_DECREASE) || (current_tpt == 0)) {
1645 IWL_DEBUG_RATE(mvm, 1698 IWL_DEBUG_RATE(mvm,
1646 "decrease rate because of low SR\n"); 1699 "Decrease rate because of low SR\n");
1647 action = RS_ACTION_DOWNSCALE; 1700 return RS_ACTION_DOWNSCALE;
1648 /* No throughput measured yet for adjacent rates; try increase. */
1649 } else if ((low_tpt == IWL_INVALID_VALUE) &&
1650 (high_tpt == IWL_INVALID_VALUE)) {
1651 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) {
1652 IWL_DEBUG_RATE(mvm,
1653 "Good SR and no high rate measurement. "
1654 "Increase rate\n");
1655 action = RS_ACTION_UPSCALE;
1656 } else if (low != IWL_RATE_INVALID) {
1657 IWL_DEBUG_RATE(mvm,
1658 "Remain in current rate\n");
1659 action = RS_ACTION_STAY;
1660 }
1661 } 1701 }
1662 1702
1663 /* Both adjacent throughputs are measured, but neither one has better 1703 if ((low_tpt == IWL_INVALID_VALUE) &&
1664 * throughput; we're using the best rate, don't change it! 1704 (high_tpt == IWL_INVALID_VALUE) &&
1665 */ 1705 (high != IWL_RATE_INVALID)) {
1666 else if ((low_tpt != IWL_INVALID_VALUE) &&
1667 (high_tpt != IWL_INVALID_VALUE) &&
1668 (low_tpt < current_tpt) &&
1669 (high_tpt < current_tpt)) {
1670 IWL_DEBUG_RATE(mvm, 1706 IWL_DEBUG_RATE(mvm,
1671 "Both high and low are worse. " 1707 "No data about high/low rates. Increase rate\n");
1672 "Maintain rate\n"); 1708 return RS_ACTION_UPSCALE;
1673 action = RS_ACTION_STAY;
1674 } 1709 }
1675 1710
1676 /* At least one adjacent rate's throughput is measured, 1711 if ((high_tpt == IWL_INVALID_VALUE) &&
1677 * and may have better performance. 1712 (high != IWL_RATE_INVALID) &&
1678 */ 1713 (low_tpt != IWL_INVALID_VALUE) &&
1679 else { 1714 (low_tpt < current_tpt)) {
1680 /* Higher adjacent rate's throughput is measured */ 1715 IWL_DEBUG_RATE(mvm,
1681 if (high_tpt != IWL_INVALID_VALUE) { 1716 "No data about high rate and low rate is worse. Increase rate\n");
1682 /* Higher rate has better throughput */ 1717 return RS_ACTION_UPSCALE;
1683 if (high_tpt > current_tpt && 1718 }
1684 sr >= IWL_RATE_INCREASE_TH) {
1685 IWL_DEBUG_RATE(mvm,
1686 "Higher rate is better and good "
1687 "SR. Increate rate\n");
1688 action = RS_ACTION_UPSCALE;
1689 } else {
1690 IWL_DEBUG_RATE(mvm,
1691 "Higher rate isn't better OR "
1692 "no good SR. Maintain rate\n");
1693 action = RS_ACTION_STAY;
1694 }
1695 1719
1696 /* Lower adjacent rate's throughput is measured */ 1720 if ((high_tpt != IWL_INVALID_VALUE) &&
1697 } else if (low_tpt != IWL_INVALID_VALUE) { 1721 (high_tpt > current_tpt)) {
1698 /* Lower rate has better throughput */ 1722 IWL_DEBUG_RATE(mvm,
1699 if (low_tpt > current_tpt) { 1723 "Higher rate is better. Increate rate\n");
1700 IWL_DEBUG_RATE(mvm, 1724 return RS_ACTION_UPSCALE;
1701 "Lower rate is better. "
1702 "Decrease rate\n");
1703 action = RS_ACTION_DOWNSCALE;
1704 } else if (sr >= IWL_RATE_INCREASE_TH) {
1705 IWL_DEBUG_RATE(mvm,
1706 "Lower rate isn't better and "
1707 "good SR. Increase rate\n");
1708 action = RS_ACTION_UPSCALE;
1709 }
1710 }
1711 } 1725 }
1712 1726
1713 /* Sanity check; asked for decrease, but success rate or throughput 1727 if ((low_tpt != IWL_INVALID_VALUE) &&
1714 * has been good at old rate. Don't change it. 1728 (high_tpt != IWL_INVALID_VALUE) &&
1715 */ 1729 (low_tpt < current_tpt) &&
1716 if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID) && 1730 (high_tpt < current_tpt)) {
1717 ((sr > IWL_RATE_HIGH_TH) || 1731 IWL_DEBUG_RATE(mvm,
1718 (current_tpt > (100 * tbl->expected_tpt[low])))) { 1732 "Both high and low are worse. Maintain rate\n");
1733 return RS_ACTION_STAY;
1734 }
1735
1736 if ((low_tpt != IWL_INVALID_VALUE) &&
1737 (low_tpt > current_tpt)) {
1738 IWL_DEBUG_RATE(mvm,
1739 "Lower rate is better\n");
1740 action = RS_ACTION_DOWNSCALE;
1741 goto out;
1742 }
1743
1744 if ((low_tpt == IWL_INVALID_VALUE) &&
1745 (low != IWL_RATE_INVALID)) {
1719 IWL_DEBUG_RATE(mvm, 1746 IWL_DEBUG_RATE(mvm,
1720 "Sanity check failed. Maintain rate\n"); 1747 "No data about lower rate\n");
1721 action = RS_ACTION_STAY; 1748 action = RS_ACTION_DOWNSCALE;
1749 goto out;
1750 }
1751
1752 IWL_DEBUG_RATE(mvm, "Maintain rate\n");
1753
1754out:
1755 if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID)) {
1756 if (sr >= RS_SR_NO_DECREASE) {
1757 IWL_DEBUG_RATE(mvm,
1758 "SR is above NO DECREASE. Avoid downscale\n");
1759 action = RS_ACTION_STAY;
1760 } else if (current_tpt > (100 * tbl->expected_tpt[low])) {
1761 IWL_DEBUG_RATE(mvm,
1762 "Current TPT is higher than max expected in low rate. Avoid downscale\n");
1763 action = RS_ACTION_STAY;
1764 } else {
1765 IWL_DEBUG_RATE(mvm, "Decrease rate\n");
1766 }
1722 } 1767 }
1723 1768
1724 return action; 1769 return action;
@@ -1792,6 +1837,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
1792 "Aggregation changed: prev %d current %d. Update expected TPT table\n", 1837 "Aggregation changed: prev %d current %d. Update expected TPT table\n",
1793 prev_agg, lq_sta->is_agg); 1838 prev_agg, lq_sta->is_agg);
1794 rs_set_expected_tpt_table(lq_sta, tbl); 1839 rs_set_expected_tpt_table(lq_sta, tbl);
1840 rs_rate_scale_clear_tbl_windows(mvm, tbl);
1795 } 1841 }
1796 1842
1797 /* current tx rate */ 1843 /* current tx rate */
@@ -2021,7 +2067,7 @@ lq_update:
2021 if (lq_sta->search_better_tbl) { 2067 if (lq_sta->search_better_tbl) {
2022 /* Access the "search" table, clear its history. */ 2068 /* Access the "search" table, clear its history. */
2023 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); 2069 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2024 rs_rate_scale_clear_tbl_windows(tbl); 2070 rs_rate_scale_clear_tbl_windows(mvm, tbl);
2025 2071
2026 /* Use new "search" start rate */ 2072 /* Use new "search" start rate */
2027 index = tbl->rate.index; 2073 index = tbl->rate.index;
@@ -2042,8 +2088,18 @@ lq_update:
2042 * stay with best antenna legacy modulation for a while 2088 * stay with best antenna legacy modulation for a while
2043 * before next round of mode comparisons. */ 2089 * before next round of mode comparisons. */
2044 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); 2090 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2045 if (is_legacy(&tbl1->rate) && !sta->ht_cap.ht_supported) { 2091 if (is_legacy(&tbl1->rate)) {
2046 IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n"); 2092 IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n");
2093
2094 if (tid != IWL_MAX_TID_COUNT) {
2095 tid_data = &sta_priv->tid_data[tid];
2096 if (tid_data->state != IWL_AGG_OFF) {
2097 IWL_DEBUG_RATE(mvm,
2098 "Stop aggregation on tid %d\n",
2099 tid);
2100 ieee80211_stop_tx_ba_session(sta, tid);
2101 }
2102 }
2047 rs_set_stay_in_table(mvm, 1, lq_sta); 2103 rs_set_stay_in_table(mvm, 1, lq_sta);
2048 } else { 2104 } else {
2049 /* If we're in an HT mode, and all 3 mode switch actions 2105 /* If we're in an HT mode, and all 3 mode switch actions
@@ -2342,9 +2398,10 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2342 lq_sta->lq.sta_id = sta_priv->sta_id; 2398 lq_sta->lq.sta_id = sta_priv->sta_id;
2343 2399
2344 for (j = 0; j < LQ_SIZE; j++) 2400 for (j = 0; j < LQ_SIZE; j++)
2345 rs_rate_scale_clear_tbl_windows(&lq_sta->lq_info[j]); 2401 rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
2346 2402
2347 lq_sta->flush_timer = 0; 2403 lq_sta->flush_timer = 0;
2404 lq_sta->last_tx = jiffies;
2348 2405
2349 IWL_DEBUG_RATE(mvm, 2406 IWL_DEBUG_RATE(mvm,
2350 "LQ: *** rate scale station global init for station %d ***\n", 2407 "LQ: *** rate scale station global init for station %d ***\n",
@@ -2388,11 +2445,22 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2388 lq_sta->is_vht = true; 2445 lq_sta->is_vht = true;
2389 } 2446 }
2390 2447
2391 IWL_DEBUG_RATE(mvm, 2448 lq_sta->max_legacy_rate_idx = find_last_bit(&lq_sta->active_legacy_rate,
2392 "SISO-RATE=%X MIMO2-RATE=%X VHT=%d\n", 2449 BITS_PER_LONG);
2450 lq_sta->max_siso_rate_idx = find_last_bit(&lq_sta->active_siso_rate,
2451 BITS_PER_LONG);
2452 lq_sta->max_mimo2_rate_idx = find_last_bit(&lq_sta->active_mimo2_rate,
2453 BITS_PER_LONG);
2454
2455 IWL_DEBUG_RATE(mvm, "RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d\n",
2456 lq_sta->active_legacy_rate,
2393 lq_sta->active_siso_rate, 2457 lq_sta->active_siso_rate,
2394 lq_sta->active_mimo2_rate, 2458 lq_sta->active_mimo2_rate,
2395 lq_sta->is_vht); 2459 lq_sta->is_vht);
2460 IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n",
2461 lq_sta->max_legacy_rate_idx,
2462 lq_sta->max_siso_rate_idx,
2463 lq_sta->max_mimo2_rate_idx);
2396 2464
2397 /* These values will be overridden later */ 2465 /* These values will be overridden later */
2398 lq_sta->lq.single_stream_ant_msk = 2466 lq_sta->lq.single_stream_ant_msk =
@@ -2547,6 +2615,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
2547 if (is_siso(&rate)) { 2615 if (is_siso(&rate)) {
2548 num_rates = RS_SECONDARY_SISO_NUM_RATES; 2616 num_rates = RS_SECONDARY_SISO_NUM_RATES;
2549 num_retries = RS_SECONDARY_SISO_RETRIES; 2617 num_retries = RS_SECONDARY_SISO_RETRIES;
2618 lq_cmd->mimo_delim = index;
2550 } else if (is_legacy(&rate)) { 2619 } else if (is_legacy(&rate)) {
2551 num_rates = RS_SECONDARY_LEGACY_NUM_RATES; 2620 num_rates = RS_SECONDARY_LEGACY_NUM_RATES;
2552 num_retries = RS_LEGACY_RETRIES_PER_RATE; 2621 num_retries = RS_LEGACY_RETRIES_PER_RATE;
@@ -2749,7 +2818,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
2749 return -ENOMEM; 2818 return -ENOMEM;
2750 2819
2751 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); 2820 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
2752 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", 2821 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%lX\n",
2753 lq_sta->total_failed, lq_sta->total_success, 2822 lq_sta->total_failed, lq_sta->total_success,
2754 lq_sta->active_legacy_rate); 2823 lq_sta->active_legacy_rate);
2755 desc += sprintf(buff+desc, "fixed rate 0x%X\n", 2824 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 3332b396011e..0acfac96a56c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -156,6 +156,7 @@ enum {
156#define IWL_RATE_HIGH_TH 10880 /* 85% */ 156#define IWL_RATE_HIGH_TH 10880 /* 85% */
157#define IWL_RATE_INCREASE_TH 6400 /* 50% */ 157#define IWL_RATE_INCREASE_TH 6400 /* 50% */
158#define RS_SR_FORCE_DECREASE 1920 /* 15% */ 158#define RS_SR_FORCE_DECREASE 1920 /* 15% */
159#define RS_SR_NO_DECREASE 10880 /* 85% */
159 160
160#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ 161#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
161#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000) 162#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
@@ -310,13 +311,20 @@ struct iwl_lq_sta {
310 u32 visited_columns; /* Bitmask marking which Tx columns were 311 u32 visited_columns; /* Bitmask marking which Tx columns were
311 * explored during a search cycle 312 * explored during a search cycle
312 */ 313 */
314 u64 last_tx;
313 bool is_vht; 315 bool is_vht;
314 enum ieee80211_band band; 316 enum ieee80211_band band;
315 317
316 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ 318 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
317 u16 active_legacy_rate; 319 unsigned long active_legacy_rate;
318 u16 active_siso_rate; 320 unsigned long active_siso_rate;
319 u16 active_mimo2_rate; 321 unsigned long active_mimo2_rate;
322
323 /* Highest rate per Tx mode */
324 u8 max_legacy_rate_idx;
325 u8 max_siso_rate_idx;
326 u8 max_mimo2_rate_idx;
327
320 s8 max_rate_idx; /* Max rate set by user */ 328 s8 max_rate_idx; /* Max rate set by user */
321 u8 missed_rate_counter; 329 u8 missed_rate_counter;
322 330
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index c91dc8498852..c28de54c75d4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
277 IEEE80211_IFACE_ITER_NORMAL, 277 IEEE80211_IFACE_ITER_NORMAL,
278 iwl_mvm_scan_condition_iterator, 278 iwl_mvm_scan_condition_iterator,
279 &global_bound); 279 &global_bound);
280 /*
281 * Under low latency traffic passive scan is fragmented meaning
282 * that dwell on a particular channel will be fragmented. Each fragment
283 * dwell time is 20ms and fragments period is 105ms. Skipping to next
284 * channel will be delayed by the same period - 105ms. So suspend_time
285 * parameter describing both fragments and channels skipping periods is
286 * set to 105ms. This value is chosen so that overall passive scan
287 * duration will not be too long. Max_out_time in this case is set to
288 * 70ms, so for active scanning operating channel will be left for 70ms
289 * while for passive still for 20ms (fragment dwell).
290 */
291 if (global_bound) {
292 if (!iwl_mvm_low_latency(mvm)) {
293 params->suspend_time = ieee80211_tu_to_usec(100);
294 params->max_out_time = ieee80211_tu_to_usec(600);
295 } else {
296 params->suspend_time = ieee80211_tu_to_usec(105);
297 /* P2P doesn't support fragmented passive scan, so
298 * configure max_out_time to be at least longest dwell
299 * time for passive scan.
300 */
301 if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
302 params->max_out_time = ieee80211_tu_to_usec(70);
303 params->passive_fragmented = true;
304 } else {
305 u32 passive_dwell;
306 280
307 /* 281 if (!global_bound)
308 * Use band G so that passive channel dwell time 282 goto not_bound;
309 * will be assigned with maximum value. 283
310 */ 284 params->suspend_time = 100;
311 band = IEEE80211_BAND_2GHZ; 285 params->max_out_time = 600;
312 passive_dwell = iwl_mvm_get_passive_dwell(band); 286
313 params->max_out_time = 287 if (iwl_mvm_low_latency(mvm)) {
314 ieee80211_tu_to_usec(passive_dwell); 288 params->suspend_time = 250;
315 } 289 params->max_out_time = 250;
316 }
317 } 290 }
318 291
292not_bound:
293
319 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { 294 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
320 if (params->passive_fragmented) 295 params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
321 params->dwell[band].passive = 20;
322 else
323 params->dwell[band].passive =
324 iwl_mvm_get_passive_dwell(band);
325 params->dwell[band].active = iwl_mvm_get_active_dwell(band, 296 params->dwell[band].active = iwl_mvm_get_active_dwell(band,
326 n_ssids); 297 n_ssids);
327 } 298 }
@@ -761,7 +732,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
761 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; 732 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
762 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; 733 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
763 int head = 0; 734 int head = 0;
764 int tail = band_2ghz + band_5ghz; 735 int tail = band_2ghz + band_5ghz - 1;
765 u32 ssid_bitmap; 736 u32 ssid_bitmap;
766 int cmd_len; 737 int cmd_len;
767 int ret; 738 int ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
index 8401627c0030..88809b2d1654 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
@@ -274,7 +274,8 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
274 return -EINVAL; 274 return -EINVAL;
275 if (changed_vif->type != NL80211_IFTYPE_STATION) { 275 if (changed_vif->type != NL80211_IFTYPE_STATION) {
276 new_state = SF_UNINIT; 276 new_state = SF_UNINIT;
277 } else if (changed_vif->bss_conf.assoc) { 277 } else if (changed_vif->bss_conf.assoc &&
278 changed_vif->bss_conf.dtim_period) {
278 mvmvif = iwl_mvm_vif_from_mac80211(changed_vif); 279 mvmvif = iwl_mvm_vif_from_mac80211(changed_vif);
279 sta_id = mvmvif->ap_sta_id; 280 sta_id = mvmvif->ap_sta_id;
280 new_state = SF_FULL_ON; 281 new_state = SF_FULL_ON;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index d619851745a1..2180902266ae 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -644,3 +644,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
644 644
645 return result; 645 return result;
646} 646}
647
648static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
649{
650 bool *idle = _data;
651
652 if (!vif->bss_conf.idle)
653 *idle = false;
654}
655
656bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
657{
658 bool idle = true;
659
660 ieee80211_iterate_active_interfaces_atomic(
661 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
662 iwl_mvm_idle_iter, &idle);
663
664 return idle;
665}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index edb015c99049..3d1d57f9f5bc 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -373,12 +373,14 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
373 {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)}, 373 {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
374 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, 374 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
375 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, 375 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
376 {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)},
376 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, 377 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
377 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, 378 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
378 {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, 379 {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
379 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, 380 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
380 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
381 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, 382 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
383 {IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)},
382 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, 384 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
383 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, 385 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
384 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, 386 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index dcfd6d866d09..2365553f1ef7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1749,6 +1749,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1749 * PCI Tx retries from interfering with C3 CPU state */ 1749 * PCI Tx retries from interfering with C3 CPU state */
1750 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 1750 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
1751 1751
1752 trans->dev = &pdev->dev;
1753 trans_pcie->pci_dev = pdev;
1754 iwl_disable_interrupts(trans);
1755
1752 err = pci_enable_msi(pdev); 1756 err = pci_enable_msi(pdev);
1753 if (err) { 1757 if (err) {
1754 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); 1758 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@ -1760,8 +1764,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1760 } 1764 }
1761 } 1765 }
1762 1766
1763 trans->dev = &pdev->dev;
1764 trans_pcie->pci_dev = pdev;
1765 trans->hw_rev = iwl_read32(trans, CSR_HW_REV); 1767 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
1766 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; 1768 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
1767 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), 1769 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@ -1787,8 +1789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1787 goto out_pci_disable_msi; 1789 goto out_pci_disable_msi;
1788 } 1790 }
1789 1791
1790 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1791
1792 if (iwl_pcie_alloc_ict(trans)) 1792 if (iwl_pcie_alloc_ict(trans))
1793 goto out_free_cmd_pool; 1793 goto out_free_cmd_pool;
1794 1794
@@ -1800,6 +1800,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1800 goto out_free_ict; 1800 goto out_free_ict;
1801 } 1801 }
1802 1802
1803 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1804
1803 return trans; 1805 return trans;
1804 1806
1805out_free_ict: 1807out_free_ict:
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 77db0886c6e2..9c771b3e9918 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -292,6 +292,12 @@ process_start:
292 while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) 292 while ((skb = skb_dequeue(&adapter->usb_rx_data_q)))
293 mwifiex_handle_rx_packet(adapter, skb); 293 mwifiex_handle_rx_packet(adapter, skb);
294 294
295 /* Check for event */
296 if (adapter->event_received) {
297 adapter->event_received = false;
298 mwifiex_process_event(adapter);
299 }
300
295 /* Check for Cmd Resp */ 301 /* Check for Cmd Resp */
296 if (adapter->cmd_resp_received) { 302 if (adapter->cmd_resp_received) {
297 adapter->cmd_resp_received = false; 303 adapter->cmd_resp_received = false;
@@ -304,12 +310,6 @@ process_start:
304 } 310 }
305 } 311 }
306 312
307 /* Check for event */
308 if (adapter->event_received) {
309 adapter->event_received = false;
310 mwifiex_process_event(adapter);
311 }
312
313 /* Check if we need to confirm Sleep Request 313 /* Check if we need to confirm Sleep Request
314 received previously */ 314 received previously */
315 if (adapter->ps_state == PS_STATE_PRE_SLEEP) { 315 if (adapter->ps_state == PS_STATE_PRE_SLEEP) {
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 894270611f2c..536c14aa71f3 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -60,9 +60,10 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
60 int status; 60 int status;
61 61
62 /* Wait for completion */ 62 /* Wait for completion */
63 status = wait_event_interruptible(adapter->cmd_wait_q.wait, 63 status = wait_event_interruptible_timeout(adapter->cmd_wait_q.wait,
64 *(cmd_queued->condition)); 64 *(cmd_queued->condition),
65 if (status) { 65 (12 * HZ));
66 if (status <= 0) {
66 dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status); 67 dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
67 mwifiex_cancel_all_pending_cmd(adapter); 68 mwifiex_cancel_all_pending_cmd(adapter);
68 return status; 69 return status;
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index 1a8d32138593..cf61d6e3eaa7 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -88,7 +88,7 @@ static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
88 bool recontend_queue = false; 88 bool recontend_queue = false;
89 u32 q_len = 0; 89 u32 q_len = 0;
90 u8 q_num = INVALID_QUEUE; 90 u8 q_num = INVALID_QUEUE;
91 u8 ii, min = 0; 91 u8 ii = 0, min = 0;
92 92
93 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) { 93 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
94 if (!common->mgmt_q_block) 94 if (!common->mgmt_q_block)
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 73694295648f..1b28cda6ca88 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -841,16 +841,6 @@ int rsi_set_channel(struct rsi_common *common, u16 channel)
841 rsi_dbg(MGMT_TX_ZONE, 841 rsi_dbg(MGMT_TX_ZONE,
842 "%s: Sending scan req frame\n", __func__); 842 "%s: Sending scan req frame\n", __func__);
843 843
844 skb = dev_alloc_skb(FRAME_DESC_SZ);
845 if (!skb) {
846 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
847 __func__);
848 return -ENOMEM;
849 }
850
851 memset(skb->data, 0, FRAME_DESC_SZ);
852 mgmt_frame = (struct rsi_mac_frame *)skb->data;
853
854 if (common->band == IEEE80211_BAND_5GHZ) { 844 if (common->band == IEEE80211_BAND_5GHZ) {
855 if ((channel >= 36) && (channel <= 64)) 845 if ((channel >= 36) && (channel <= 64))
856 channel = ((channel - 32) / 4); 846 channel = ((channel - 32) / 4);
@@ -868,6 +858,16 @@ int rsi_set_channel(struct rsi_common *common, u16 channel)
868 } 858 }
869 } 859 }
870 860
861 skb = dev_alloc_skb(FRAME_DESC_SZ);
862 if (!skb) {
863 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
864 __func__);
865 return -ENOMEM;
866 }
867
868 memset(skb->data, 0, FRAME_DESC_SZ);
869 mgmt_frame = (struct rsi_mac_frame *)skb->data;
870
871 mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); 871 mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
872 mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST); 872 mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST);
873 mgmt_frame->desc_word[4] = cpu_to_le16(channel); 873 mgmt_frame->desc_word[4] = cpu_to_le16(channel);
@@ -966,6 +966,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common)
966 if (!selected_rates) { 966 if (!selected_rates) {
967 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of mem\n", 967 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of mem\n",
968 __func__); 968 __func__);
969 dev_kfree_skb(skb);
969 return -ENOMEM; 970 return -ENOMEM;
970 } 971 }
971 972
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index ddeb5a709aa3..a87ee9b6585a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -621,20 +621,18 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
621 bss_conf->bssid); 621 bss_conf->bssid);
622 622
623 /* 623 /*
624 * Update the beacon. This is only required on USB devices. PCI
625 * devices fetch beacons periodically.
626 */
627 if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
628 rt2x00queue_update_beacon(rt2x00dev, vif);
629
630 /*
631 * Start/stop beaconing. 624 * Start/stop beaconing.
632 */ 625 */
633 if (changes & BSS_CHANGED_BEACON_ENABLED) { 626 if (changes & BSS_CHANGED_BEACON_ENABLED) {
634 if (!bss_conf->enable_beacon && intf->enable_beacon) { 627 if (!bss_conf->enable_beacon && intf->enable_beacon) {
635 rt2x00queue_clear_beacon(rt2x00dev, vif);
636 rt2x00dev->intf_beaconing--; 628 rt2x00dev->intf_beaconing--;
637 intf->enable_beacon = false; 629 intf->enable_beacon = false;
630 /*
631 * Clear beacon in the H/W for this vif. This is needed
632 * to disable beaconing on this particular interface
633 * and keep it running on other interfaces.
634 */
635 rt2x00queue_clear_beacon(rt2x00dev, vif);
638 636
639 if (rt2x00dev->intf_beaconing == 0) { 637 if (rt2x00dev->intf_beaconing == 0) {
640 /* 638 /*
@@ -645,11 +643,15 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
645 rt2x00queue_stop_queue(rt2x00dev->bcn); 643 rt2x00queue_stop_queue(rt2x00dev->bcn);
646 mutex_unlock(&intf->beacon_skb_mutex); 644 mutex_unlock(&intf->beacon_skb_mutex);
647 } 645 }
648
649
650 } else if (bss_conf->enable_beacon && !intf->enable_beacon) { 646 } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
651 rt2x00dev->intf_beaconing++; 647 rt2x00dev->intf_beaconing++;
652 intf->enable_beacon = true; 648 intf->enable_beacon = true;
649 /*
650 * Upload beacon to the H/W. This is only required on
651 * USB devices. PCI devices fetch beacons periodically.
652 */
653 if (rt2x00_is_usb(rt2x00dev))
654 rt2x00queue_update_beacon(rt2x00dev, vif);
653 655
654 if (rt2x00dev->intf_beaconing == 1) { 656 if (rt2x00dev->intf_beaconing == 1) {
655 /* 657 /*
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index 06ef47cd6203..5b4c225396f2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -293,7 +293,7 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw,
293 u8 *psaddr; 293 u8 *psaddr;
294 __le16 fc; 294 __le16 fc;
295 u16 type, ufc; 295 u16 type, ufc;
296 bool match_bssid, packet_toself, packet_beacon, addr; 296 bool match_bssid, packet_toself, packet_beacon = false, addr;
297 297
298 tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift; 298 tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
299 299
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 68b5c7e92cfb..07cb06da6729 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1001,7 +1001,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
1001 err = _rtl92cu_init_mac(hw); 1001 err = _rtl92cu_init_mac(hw);
1002 if (err) { 1002 if (err) {
1003 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n"); 1003 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n");
1004 return err; 1004 goto exit;
1005 } 1005 }
1006 err = rtl92c_download_fw(hw); 1006 err = rtl92c_download_fw(hw);
1007 if (err) { 1007 if (err) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 36b48be8329c..2b3c78baa9f8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -49,6 +49,12 @@ static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 skb_queue)
49 if (ieee80211_is_nullfunc(fc)) 49 if (ieee80211_is_nullfunc(fc))
50 return QSLT_HIGH; 50 return QSLT_HIGH;
51 51
52 /* Kernel commit 1bf4bbb4024dcdab changed EAPOL packets to use
53 * queue V0 at priority 7; however, the RTL8192SE appears to have
54 * that queue at priority 6
55 */
56 if (skb->priority == 7)
57 return QSLT_VO;
52 return skb->priority; 58 return skb->priority;
53} 59}
54 60
diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
index 398f3d2c0a6c..a76e98eb8372 100644
--- a/drivers/net/wireless/ti/wl18xx/event.h
+++ b/drivers/net/wireless/ti/wl18xx/event.h
@@ -68,6 +68,26 @@ struct wl18xx_event_mailbox {
68 68
69 /* bitmap of inactive stations (by HLID) */ 69 /* bitmap of inactive stations (by HLID) */
70 __le32 inactive_sta_bitmap; 70 __le32 inactive_sta_bitmap;
71
72 /* rx BA win size indicated by RX_BA_WIN_SIZE_CHANGE_EVENT_ID */
73 u8 rx_ba_role_id;
74 u8 rx_ba_link_id;
75 u8 rx_ba_win_size;
76 u8 padding;
77
78 /* smart config */
79 u8 sc_ssid_len;
80 u8 sc_pwd_len;
81 u8 sc_token_len;
82 u8 padding1;
83 u8 sc_ssid[32];
84 u8 sc_pwd[32];
85 u8 sc_token[32];
86
87 /* smart config sync channel */
88 u8 sc_sync_channel;
89 u8 sc_sync_band;
90 u8 padding2[2];
71} __packed; 91} __packed;
72 92
73int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event, 93int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 1f9a36031b06..16d10281798d 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -158,6 +158,11 @@ EXPORT_SYMBOL_GPL(wlcore_event_channel_switch);
158 158
159void wlcore_event_dummy_packet(struct wl1271 *wl) 159void wlcore_event_dummy_packet(struct wl1271 *wl)
160{ 160{
161 if (wl->plt) {
162 wl1271_info("Got DUMMY_PACKET event in PLT mode. FW bug, ignoring.");
163 return;
164 }
165
161 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID"); 166 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
162 wl1271_tx_dummy_packet(wl); 167 wl1271_tx_dummy_packet(wl);
163} 168}
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 630a3fcf65bc..0d4a285cbd7e 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
226 grant_ref_t rx_ring_ref); 226 grant_ref_t rx_ring_ref);
227 227
228/* Check for SKBs from frontend and schedule backend processing */ 228/* Check for SKBs from frontend and schedule backend processing */
229void xenvif_check_rx_xenvif(struct xenvif *vif); 229void xenvif_napi_schedule_or_enable_events(struct xenvif *vif);
230 230
231/* Prevent the device from generating any further traffic. */ 231/* Prevent the device from generating any further traffic. */
232void xenvif_carrier_off(struct xenvif *vif); 232void xenvif_carrier_off(struct xenvif *vif);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index ef05c5c49d41..20e9defa1060 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
75 work_done = xenvif_tx_action(vif, budget); 75 work_done = xenvif_tx_action(vif, budget);
76 76
77 if (work_done < budget) { 77 if (work_done < budget) {
78 int more_to_do = 0; 78 napi_complete(napi);
79 unsigned long flags; 79 xenvif_napi_schedule_or_enable_events(vif);
80
81 /* It is necessary to disable IRQ before calling
82 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
83 * lose event from the frontend.
84 *
85 * Consider:
86 * RING_HAS_UNCONSUMED_REQUESTS
87 * <frontend generates event to trigger napi_schedule>
88 * __napi_complete
89 *
90 * This handler is still in scheduled state so the
91 * event has no effect at all. After __napi_complete
92 * this handler is descheduled and cannot get
93 * scheduled again. We lose event in this case and the ring
94 * will be completely stalled.
95 */
96
97 local_irq_save(flags);
98
99 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
100 if (!more_to_do)
101 __napi_complete(napi);
102
103 local_irq_restore(flags);
104 } 80 }
105 81
106 return work_done; 82 return work_done;
@@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif)
194 enable_irq(vif->tx_irq); 170 enable_irq(vif->tx_irq);
195 if (vif->tx_irq != vif->rx_irq) 171 if (vif->tx_irq != vif->rx_irq)
196 enable_irq(vif->rx_irq); 172 enable_irq(vif->rx_irq);
197 xenvif_check_rx_xenvif(vif); 173 xenvif_napi_schedule_or_enable_events(vif);
198} 174}
199 175
200static void xenvif_down(struct xenvif *vif) 176static void xenvif_down(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 76665405c5aa..7367208ee8cd 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
104 104
105/* Find the containing VIF's structure from a pointer in pending_tx_info array 105/* Find the containing VIF's structure from a pointer in pending_tx_info array
106 */ 106 */
107static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf) 107static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
108{ 108{
109 u16 pending_idx = ubuf->desc; 109 u16 pending_idx = ubuf->desc;
110 struct pending_tx_info *temp = 110 struct pending_tx_info *temp =
@@ -323,6 +323,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
323} 323}
324 324
325/* 325/*
326 * Find the grant ref for a given frag in a chain of struct ubuf_info's
327 * skb: the skb itself
328 * i: the frag's number
329 * ubuf: a pointer to an element in the chain. It should not be NULL
330 *
331 * Returns a pointer to the element in the chain where the page were found. If
332 * not found, returns NULL.
333 * See the definition of callback_struct in common.h for more details about
334 * the chain.
335 */
336static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
337 const int i,
338 const struct ubuf_info *ubuf)
339{
340 struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
341
342 do {
343 u16 pending_idx = ubuf->desc;
344
345 if (skb_shinfo(skb)->frags[i].page.p ==
346 foreign_vif->mmap_pages[pending_idx])
347 break;
348 ubuf = (struct ubuf_info *) ubuf->ctx;
349 } while (ubuf);
350
351 return ubuf;
352}
353
354/*
326 * Prepare an SKB to be transmitted to the frontend. 355 * Prepare an SKB to be transmitted to the frontend.
327 * 356 *
328 * This function is responsible for allocating grant operations, meta 357 * This function is responsible for allocating grant operations, meta
@@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb,
346 int head = 1; 375 int head = 1;
347 int old_meta_prod; 376 int old_meta_prod;
348 int gso_type; 377 int gso_type;
349 struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; 378 const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
350 grant_ref_t foreign_grefs[MAX_SKB_FRAGS]; 379 const struct ubuf_info *const head_ubuf = ubuf;
351 struct xenvif *foreign_vif = NULL;
352 380
353 old_meta_prod = npo->meta_prod; 381 old_meta_prod = npo->meta_prod;
354 382
@@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
386 npo->copy_off = 0; 414 npo->copy_off = 0;
387 npo->copy_gref = req->gref; 415 npo->copy_gref = req->gref;
388 416
389 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
390 (ubuf->callback == &xenvif_zerocopy_callback)) {
391 int i = 0;
392 foreign_vif = ubuf_to_vif(ubuf);
393
394 do {
395 u16 pending_idx = ubuf->desc;
396 foreign_grefs[i++] =
397 foreign_vif->pending_tx_info[pending_idx].req.gref;
398 ubuf = (struct ubuf_info *) ubuf->ctx;
399 } while (ubuf);
400 }
401
402 data = skb->data; 417 data = skb->data;
403 while (data < skb_tail_pointer(skb)) { 418 while (data < skb_tail_pointer(skb)) {
404 unsigned int offset = offset_in_page(data); 419 unsigned int offset = offset_in_page(data);
@@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb,
415 } 430 }
416 431
417 for (i = 0; i < nr_frags; i++) { 432 for (i = 0; i < nr_frags; i++) {
433 /* This variable also signals whether foreign_gref has a real
434 * value or not.
435 */
436 struct xenvif *foreign_vif = NULL;
437 grant_ref_t foreign_gref;
438
439 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
440 (ubuf->callback == &xenvif_zerocopy_callback)) {
441 const struct ubuf_info *const startpoint = ubuf;
442
443 /* Ideally ubuf points to the chain element which
444 * belongs to this frag. Or if frags were removed from
445 * the beginning, then shortly before it.
446 */
447 ubuf = xenvif_find_gref(skb, i, ubuf);
448
449 /* Try again from the beginning of the list, if we
450 * haven't tried from there. This only makes sense in
451 * the unlikely event of reordering the original frags.
452 * For injected local pages it's an unnecessary second
453 * run.
454 */
455 if (unlikely(!ubuf) && startpoint != head_ubuf)
456 ubuf = xenvif_find_gref(skb, i, head_ubuf);
457
458 if (likely(ubuf)) {
459 u16 pending_idx = ubuf->desc;
460
461 foreign_vif = ubuf_to_vif(ubuf);
462 foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
463 /* Just a safety measure. If this was the last
464 * element on the list, the for loop will
465 * iterate again if a local page were added to
466 * the end. Using head_ubuf here prevents the
467 * second search on the chain. Or the original
468 * frags changed order, but that's less likely.
469 * In any way, ubuf shouldn't be NULL.
470 */
471 ubuf = ubuf->ctx ?
472 (struct ubuf_info *) ubuf->ctx :
473 head_ubuf;
474 } else
475 /* This frag was a local page, added to the
476 * array after the skb left netback.
477 */
478 ubuf = head_ubuf;
479 }
418 xenvif_gop_frag_copy(vif, skb, npo, 480 xenvif_gop_frag_copy(vif, skb, npo,
419 skb_frag_page(&skb_shinfo(skb)->frags[i]), 481 skb_frag_page(&skb_shinfo(skb)->frags[i]),
420 skb_frag_size(&skb_shinfo(skb)->frags[i]), 482 skb_frag_size(&skb_shinfo(skb)->frags[i]),
421 skb_shinfo(skb)->frags[i].page_offset, 483 skb_shinfo(skb)->frags[i].page_offset,
422 &head, 484 &head,
423 foreign_vif, 485 foreign_vif,
424 foreign_grefs[i]); 486 foreign_vif ? foreign_gref : UINT_MAX);
425 } 487 }
426 488
427 return npo->meta_prod - old_meta_prod; 489 return npo->meta_prod - old_meta_prod;
@@ -654,7 +716,7 @@ done:
654 notify_remote_via_irq(vif->rx_irq); 716 notify_remote_via_irq(vif->rx_irq);
655} 717}
656 718
657void xenvif_check_rx_xenvif(struct xenvif *vif) 719void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
658{ 720{
659 int more_to_do; 721 int more_to_do;
660 722
@@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data)
688{ 750{
689 struct xenvif *vif = (struct xenvif *)data; 751 struct xenvif *vif = (struct xenvif *)data;
690 tx_add_credit(vif); 752 tx_add_credit(vif);
691 xenvif_check_rx_xenvif(vif); 753 xenvif_napi_schedule_or_enable_events(vif);
692} 754}
693 755
694static void xenvif_tx_err(struct xenvif *vif, 756static void xenvif_tx_err(struct xenvif *vif,