aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c60
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h110
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c147
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c164
-rw-r--r--drivers/net/ethernet/marvell/sky2.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/phy/icplus.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c14
14 files changed, 371 insertions, 195 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 941b4e189adf..62d2409bb293 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2034,6 +2034,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2034 write_unlock_bh(&bond->lock); 2034 write_unlock_bh(&bond->lock);
2035 unblock_netpoll_tx(); 2035 unblock_netpoll_tx();
2036 2036
2037 if (bond->slave_cnt == 0)
2038 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
2039
2037 bond_compute_features(bond); 2040 bond_compute_features(bond);
2038 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && 2041 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
2039 (old_features & NETIF_F_VLAN_CHALLENGED)) 2042 (old_features & NETIF_F_VLAN_CHALLENGED))
@@ -3007,7 +3010,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
3007 trans_start + delta_in_ticks)) || 3010 trans_start + delta_in_ticks)) ||
3008 bond->curr_active_slave != slave) { 3011 bond->curr_active_slave != slave) {
3009 slave->link = BOND_LINK_UP; 3012 slave->link = BOND_LINK_UP;
3010 bond->current_arp_slave = NULL; 3013 if (bond->current_arp_slave) {
3014 bond_set_slave_inactive_flags(
3015 bond->current_arp_slave);
3016 bond->current_arp_slave = NULL;
3017 }
3011 3018
3012 pr_info("%s: link status definitely up for interface %s.\n", 3019 pr_info("%s: link status definitely up for interface %s.\n",
3013 bond->dev->name, slave->dev->name); 3020 bond->dev->name, slave->dev->name);
@@ -3701,17 +3708,52 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
3701 read_unlock(&bond->lock); 3708 read_unlock(&bond->lock);
3702} 3709}
3703 3710
3704static int bond_neigh_setup(struct net_device *dev, struct neigh_parms *parms) 3711static int bond_neigh_init(struct neighbour *n)
3705{ 3712{
3706 struct bonding *bond = netdev_priv(dev); 3713 struct bonding *bond = netdev_priv(n->dev);
3707 struct slave *slave = bond->first_slave; 3714 struct slave *slave = bond->first_slave;
3715 const struct net_device_ops *slave_ops;
3716 struct neigh_parms parms;
3717 int ret;
3718
3719 if (!slave)
3720 return 0;
3721
3722 slave_ops = slave->dev->netdev_ops;
3723
3724 if (!slave_ops->ndo_neigh_setup)
3725 return 0;
3726
3727 parms.neigh_setup = NULL;
3728 parms.neigh_cleanup = NULL;
3729 ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
3730 if (ret)
3731 return ret;
3732
3733 /*
3734 * Assign slave's neigh_cleanup to neighbour in case cleanup is called
3735 * after the last slave has been detached. Assumes that all slaves
3736 * utilize the same neigh_cleanup (true at this writing as only user
3737 * is ipoib).
3738 */
3739 n->parms->neigh_cleanup = parms.neigh_cleanup;
3740
3741 if (!parms.neigh_setup)
3742 return 0;
3743
3744 return parms.neigh_setup(n);
3745}
3746
3747/*
3748 * The bonding ndo_neigh_setup is called at init time beofre any
3749 * slave exists. So we must declare proxy setup function which will
3750 * be used at run time to resolve the actual slave neigh param setup.
3751 */
3752static int bond_neigh_setup(struct net_device *dev,
3753 struct neigh_parms *parms)
3754{
3755 parms->neigh_setup = bond_neigh_init;
3708 3756
3709 if (slave) {
3710 const struct net_device_ops *slave_ops
3711 = slave->dev->netdev_ops;
3712 if (slave_ops->ndo_neigh_setup)
3713 return slave_ops->ndo_neigh_setup(slave->dev, parms);
3714 }
3715 return 0; 3757 return 0;
3716} 3758}
3717 3759
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 44556b719e81..4b054812713a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1874,7 +1874,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1874 * bnx2x_periodic_task(). 1874 * bnx2x_periodic_task().
1875 */ 1875 */
1876 smp_mb(); 1876 smp_mb();
1877 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1878 } else 1877 } else
1879 bp->port.pmf = 0; 1878 bp->port.pmf = 0;
1880 1879
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index cd6dfa9eaa3a..b9b263323436 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -25,31 +25,31 @@
25 (IRO[149].base + ((funcId) * IRO[149].m1)) 25 (IRO[149].base + ((funcId) * IRO[149].m1))
26#define CSTORM_IGU_MODE_OFFSET (IRO[157].base) 26#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
27#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ 27#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
28 (IRO[315].base + ((pfId) * IRO[315].m1))
29#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
30 (IRO[316].base + ((pfId) * IRO[316].m1)) 28 (IRO[316].base + ((pfId) * IRO[316].m1))
29#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
30 (IRO[317].base + ((pfId) * IRO[317].m1))
31#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ 31#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
32 (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2)) 32 (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
33#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ 33#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
34 (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) 34 (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
35#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ 35#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
36 (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) 36 (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
37#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ 37#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
38 (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) 38 (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
39#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ 39#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
40 (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2)) 40 (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
41#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ 41#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
42 (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) 42 (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
43#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ 43#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
44 (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) 44 (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
45#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ 45#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
46 (IRO[314].base + ((pfId) * IRO[314].m1)) 46 (IRO[315].base + ((pfId) * IRO[315].m1))
47#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 47#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
48 (IRO[306].base + ((pfId) * IRO[306].m1)) 48 (IRO[307].base + ((pfId) * IRO[307].m1))
49#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 49#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
50 (IRO[305].base + ((pfId) * IRO[305].m1)) 50 (IRO[306].base + ((pfId) * IRO[306].m1))
51#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 51#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
52 (IRO[304].base + ((pfId) * IRO[304].m1)) 52 (IRO[305].base + ((pfId) * IRO[305].m1))
53#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 53#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
54 (IRO[151].base + ((funcId) * IRO[151].m1)) 54 (IRO[151].base + ((funcId) * IRO[151].m1))
55#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ 55#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
@@ -96,37 +96,37 @@
96#define TSTORM_FUNC_EN_OFFSET(funcId) \ 96#define TSTORM_FUNC_EN_OFFSET(funcId) \
97 (IRO[103].base + ((funcId) * IRO[103].m1)) 97 (IRO[103].base + ((funcId) * IRO[103].m1))
98#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ 98#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
99 (IRO[271].base + ((pfId) * IRO[271].m1))
100#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
101 (IRO[272].base + ((pfId) * IRO[272].m1)) 99 (IRO[272].base + ((pfId) * IRO[272].m1))
102#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ 100#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
103 (IRO[273].base + ((pfId) * IRO[273].m1)) 101 (IRO[273].base + ((pfId) * IRO[273].m1))
104#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ 102#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
105 (IRO[274].base + ((pfId) * IRO[274].m1)) 103 (IRO[274].base + ((pfId) * IRO[274].m1))
104#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
105 (IRO[275].base + ((pfId) * IRO[275].m1))
106#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 106#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
107 (IRO[270].base + ((pfId) * IRO[270].m1)) 107 (IRO[271].base + ((pfId) * IRO[271].m1))
108#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 108#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
109 (IRO[269].base + ((pfId) * IRO[269].m1)) 109 (IRO[270].base + ((pfId) * IRO[270].m1))
110#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 110#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
111 (IRO[268].base + ((pfId) * IRO[268].m1)) 111 (IRO[269].base + ((pfId) * IRO[269].m1))
112#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ 112#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
113 (IRO[267].base + ((pfId) * IRO[267].m1)) 113 (IRO[268].base + ((pfId) * IRO[268].m1))
114#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ 114#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
115 (IRO[276].base + ((pfId) * IRO[276].m1)) 115 (IRO[277].base + ((pfId) * IRO[277].m1))
116#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ 116#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
117 (IRO[263].base + ((pfId) * IRO[263].m1))
118#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
119 (IRO[264].base + ((pfId) * IRO[264].m1)) 117 (IRO[264].base + ((pfId) * IRO[264].m1))
120#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ 118#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
121 (IRO[265].base + ((pfId) * IRO[265].m1)) 119 (IRO[265].base + ((pfId) * IRO[265].m1))
122#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ 120#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
123 (IRO[266].base + ((pfId) * IRO[266].m1)) 121 (IRO[266].base + ((pfId) * IRO[266].m1))
122#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
123 (IRO[267].base + ((pfId) * IRO[267].m1))
124#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ 124#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
125 (IRO[202].base + ((pfId) * IRO[202].m1)) 125 (IRO[202].base + ((pfId) * IRO[202].m1))
126#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 126#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
127 (IRO[105].base + ((funcId) * IRO[105].m1)) 127 (IRO[105].base + ((funcId) * IRO[105].m1))
128#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ 128#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
129 (IRO[216].base + ((pfId) * IRO[216].m1)) 129 (IRO[217].base + ((pfId) * IRO[217].m1))
130#define TSTORM_VF_TO_PF_OFFSET(funcId) \ 130#define TSTORM_VF_TO_PF_OFFSET(funcId) \
131 (IRO[104].base + ((funcId) * IRO[104].m1)) 131 (IRO[104].base + ((funcId) * IRO[104].m1))
132#define USTORM_AGG_DATA_OFFSET (IRO[206].base) 132#define USTORM_AGG_DATA_OFFSET (IRO[206].base)
@@ -140,29 +140,29 @@
140#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ 140#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
141 (IRO[183].base + ((portId) * IRO[183].m1)) 141 (IRO[183].base + ((portId) * IRO[183].m1))
142#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ 142#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
143 (IRO[317].base + ((pfId) * IRO[317].m1)) 143 (IRO[318].base + ((pfId) * IRO[318].m1))
144#define USTORM_FUNC_EN_OFFSET(funcId) \ 144#define USTORM_FUNC_EN_OFFSET(funcId) \
145 (IRO[178].base + ((funcId) * IRO[178].m1)) 145 (IRO[178].base + ((funcId) * IRO[178].m1))
146#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ 146#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
147 (IRO[281].base + ((pfId) * IRO[281].m1))
148#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
149 (IRO[282].base + ((pfId) * IRO[282].m1)) 147 (IRO[282].base + ((pfId) * IRO[282].m1))
148#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
149 (IRO[283].base + ((pfId) * IRO[283].m1))
150#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ 150#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
151 (IRO[286].base + ((pfId) * IRO[286].m1)) 151 (IRO[287].base + ((pfId) * IRO[287].m1))
152#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ 152#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
153 (IRO[283].base + ((pfId) * IRO[283].m1)) 153 (IRO[284].base + ((pfId) * IRO[284].m1))
154#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 154#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
155 (IRO[279].base + ((pfId) * IRO[279].m1)) 155 (IRO[280].base + ((pfId) * IRO[280].m1))
156#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 156#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
157 (IRO[278].base + ((pfId) * IRO[278].m1)) 157 (IRO[279].base + ((pfId) * IRO[279].m1))
158#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 158#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
159 (IRO[277].base + ((pfId) * IRO[277].m1)) 159 (IRO[278].base + ((pfId) * IRO[278].m1))
160#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ 160#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
161 (IRO[280].base + ((pfId) * IRO[280].m1)) 161 (IRO[281].base + ((pfId) * IRO[281].m1))
162#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ 162#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
163 (IRO[284].base + ((pfId) * IRO[284].m1))
164#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
165 (IRO[285].base + ((pfId) * IRO[285].m1)) 163 (IRO[285].base + ((pfId) * IRO[285].m1))
164#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
165 (IRO[286].base + ((pfId) * IRO[286].m1))
166#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ 166#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
167 (IRO[182].base + ((pfId) * IRO[182].m1)) 167 (IRO[182].base + ((pfId) * IRO[182].m1))
168#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 168#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -188,39 +188,39 @@
188#define XSTORM_FUNC_EN_OFFSET(funcId) \ 188#define XSTORM_FUNC_EN_OFFSET(funcId) \
189 (IRO[47].base + ((funcId) * IRO[47].m1)) 189 (IRO[47].base + ((funcId) * IRO[47].m1))
190#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ 190#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
191 (IRO[294].base + ((pfId) * IRO[294].m1)) 191 (IRO[295].base + ((pfId) * IRO[295].m1))
192#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ 192#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
193 (IRO[297].base + ((pfId) * IRO[297].m1))
194#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
195 (IRO[298].base + ((pfId) * IRO[298].m1)) 193 (IRO[298].base + ((pfId) * IRO[298].m1))
196#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ 194#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
197 (IRO[299].base + ((pfId) * IRO[299].m1)) 195 (IRO[299].base + ((pfId) * IRO[299].m1))
198#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ 196#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
199 (IRO[300].base + ((pfId) * IRO[300].m1)) 197 (IRO[300].base + ((pfId) * IRO[300].m1))
200#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ 198#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
201 (IRO[301].base + ((pfId) * IRO[301].m1)) 199 (IRO[301].base + ((pfId) * IRO[301].m1))
202#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ 200#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
203 (IRO[302].base + ((pfId) * IRO[302].m1)) 201 (IRO[302].base + ((pfId) * IRO[302].m1))
204#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ 202#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
205 (IRO[303].base + ((pfId) * IRO[303].m1)) 203 (IRO[303].base + ((pfId) * IRO[303].m1))
204#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
205 (IRO[304].base + ((pfId) * IRO[304].m1))
206#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 206#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
207 (IRO[293].base + ((pfId) * IRO[293].m1)) 207 (IRO[294].base + ((pfId) * IRO[294].m1))
208#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 208#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
209 (IRO[292].base + ((pfId) * IRO[292].m1)) 209 (IRO[293].base + ((pfId) * IRO[293].m1))
210#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 210#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
211 (IRO[291].base + ((pfId) * IRO[291].m1)) 211 (IRO[292].base + ((pfId) * IRO[292].m1))
212#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ 212#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
213 (IRO[296].base + ((pfId) * IRO[296].m1)) 213 (IRO[297].base + ((pfId) * IRO[297].m1))
214#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ 214#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
215 (IRO[295].base + ((pfId) * IRO[295].m1)) 215 (IRO[296].base + ((pfId) * IRO[296].m1))
216#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ 216#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
217 (IRO[290].base + ((pfId) * IRO[290].m1)) 217 (IRO[291].base + ((pfId) * IRO[291].m1))
218#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ 218#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
219 (IRO[289].base + ((pfId) * IRO[289].m1)) 219 (IRO[290].base + ((pfId) * IRO[290].m1))
220#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ 220#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
221 (IRO[288].base + ((pfId) * IRO[288].m1)) 221 (IRO[289].base + ((pfId) * IRO[289].m1))
222#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ 222#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
223 (IRO[287].base + ((pfId) * IRO[287].m1)) 223 (IRO[288].base + ((pfId) * IRO[288].m1))
224#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ 224#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
225 (IRO[44].base + ((pfId) * IRO[44].m1)) 225 (IRO[44].base + ((pfId) * IRO[44].m1))
226#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 226#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index efa557b76ac7..ad95324dc042 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -1371,7 +1371,14 @@ static void bnx2x_update_pfc_xmac(struct link_params *params,
1371 pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN | 1371 pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN |
1372 XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN | 1372 XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
1373 XMAC_PFC_CTRL_HI_REG_RX_PFC_EN | 1373 XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
1374 XMAC_PFC_CTRL_HI_REG_TX_PFC_EN; 1374 XMAC_PFC_CTRL_HI_REG_TX_PFC_EN |
1375 XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
1376 /* Write pause and PFC registers */
1377 REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
1378 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
1379 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
1380 pfc1_val &= ~XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
1381
1375 } 1382 }
1376 1383
1377 /* Write pause and PFC registers */ 1384 /* Write pause and PFC registers */
@@ -3648,6 +3655,33 @@ static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
3648 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) { 3655 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
3649 bnx2x_cl22_read(bp, phy, 0x4, &ld_pause); 3656 bnx2x_cl22_read(bp, phy, 0x4, &ld_pause);
3650 bnx2x_cl22_read(bp, phy, 0x5, &lp_pause); 3657 bnx2x_cl22_read(bp, phy, 0x5, &lp_pause);
3658 } else if (CHIP_IS_E3(bp) &&
3659 SINGLE_MEDIA_DIRECT(params)) {
3660 u8 lane = bnx2x_get_warpcore_lane(phy, params);
3661 u16 gp_status, gp_mask;
3662 bnx2x_cl45_read(bp, phy,
3663 MDIO_AN_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_4,
3664 &gp_status);
3665 gp_mask = (MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL |
3666 MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP) <<
3667 lane;
3668 if ((gp_status & gp_mask) == gp_mask) {
3669 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
3670 MDIO_AN_REG_ADV_PAUSE, &ld_pause);
3671 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
3672 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
3673 } else {
3674 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
3675 MDIO_AN_REG_CL37_FC_LD, &ld_pause);
3676 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
3677 MDIO_AN_REG_CL37_FC_LP, &lp_pause);
3678 ld_pause = ((ld_pause &
3679 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
3680 << 3);
3681 lp_pause = ((lp_pause &
3682 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
3683 << 3);
3684 }
3651 } else { 3685 } else {
3652 bnx2x_cl45_read(bp, phy, 3686 bnx2x_cl45_read(bp, phy,
3653 MDIO_AN_DEVAD, 3687 MDIO_AN_DEVAD,
@@ -3698,7 +3732,23 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3698 u16 val16 = 0, lane, bam37 = 0; 3732 u16 val16 = 0, lane, bam37 = 0;
3699 struct bnx2x *bp = params->bp; 3733 struct bnx2x *bp = params->bp;
3700 DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n"); 3734 DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
3701 3735 /* Set to default registers that may be overriden by 10G force */
3736 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3737 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
3738 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3739 MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
3740 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3741 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0);
3742 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3743 MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff);
3744 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3745 MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555);
3746 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3747 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0);
3748 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3749 MDIO_WC_REG_RX66_CONTROL, 0x7415);
3750 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3751 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190);
3702 /* Disable Autoneg: re-enable it after adv is done. */ 3752 /* Disable Autoneg: re-enable it after adv is done. */
3703 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3753 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3704 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0); 3754 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0);
@@ -3944,13 +3994,13 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
3944 3994
3945 } else { 3995 } else {
3946 misc1_val |= 0x9; 3996 misc1_val |= 0x9;
3947 tap_val = ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | 3997 tap_val = ((0x0f << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
3948 (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | 3998 (0x2b << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
3949 (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)); 3999 (0x02 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
3950 tx_driver_val = 4000 tx_driver_val =
3951 ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | 4001 ((0x03 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
3952 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | 4002 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
3953 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)); 4003 (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
3954 } 4004 }
3955 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4005 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3956 MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val); 4006 MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
@@ -4368,7 +4418,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4368 switch (serdes_net_if) { 4418 switch (serdes_net_if) {
4369 case PORT_HW_CFG_NET_SERDES_IF_KR: 4419 case PORT_HW_CFG_NET_SERDES_IF_KR:
4370 /* Enable KR Auto Neg */ 4420 /* Enable KR Auto Neg */
4371 if (params->loopback_mode == LOOPBACK_NONE) 4421 if (params->loopback_mode != LOOPBACK_EXT)
4372 bnx2x_warpcore_enable_AN_KR(phy, params, vars); 4422 bnx2x_warpcore_enable_AN_KR(phy, params, vars);
4373 else { 4423 else {
4374 DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n"); 4424 DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n");
@@ -6166,12 +6216,14 @@ int bnx2x_set_led(struct link_params *params,
6166 6216
6167 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 6217 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
6168 if (params->phy[EXT_PHY1].type == 6218 if (params->phy[EXT_PHY1].type ==
6169 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) 6219 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
6170 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp & 0xfff1); 6220 tmp &= ~(EMAC_LED_1000MB_OVERRIDE |
6171 else { 6221 EMAC_LED_100MB_OVERRIDE |
6172 EMAC_WR(bp, EMAC_REG_EMAC_LED, 6222 EMAC_LED_10MB_OVERRIDE);
6173 (tmp | EMAC_LED_OVERRIDE)); 6223 else
6174 } 6224 tmp |= EMAC_LED_OVERRIDE;
6225
6226 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp);
6175 break; 6227 break;
6176 6228
6177 case LED_MODE_OPER: 6229 case LED_MODE_OPER:
@@ -6226,10 +6278,15 @@ int bnx2x_set_led(struct link_params *params,
6226 hw_led_mode); 6278 hw_led_mode);
6227 } else if ((params->phy[EXT_PHY1].type == 6279 } else if ((params->phy[EXT_PHY1].type ==
6228 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) && 6280 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) &&
6229 (mode != LED_MODE_OPER)) { 6281 (mode == LED_MODE_ON)) {
6230 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); 6282 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
6231 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 6283 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
6232 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp | 0x3); 6284 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp |
6285 EMAC_LED_OVERRIDE | EMAC_LED_1000MB_OVERRIDE);
6286 /* Break here; otherwise, it'll disable the
6287 * intended override.
6288 */
6289 break;
6233 } else 6290 } else
6234 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 6291 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
6235 hw_led_mode); 6292 hw_led_mode);
@@ -6244,13 +6301,9 @@ int bnx2x_set_led(struct link_params *params,
6244 LED_BLINK_RATE_VAL_E1X_E2); 6301 LED_BLINK_RATE_VAL_E1X_E2);
6245 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + 6302 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
6246 port*4, 1); 6303 port*4, 1);
6247 if ((params->phy[EXT_PHY1].type != 6304 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
6248 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) && 6305 EMAC_WR(bp, EMAC_REG_EMAC_LED,
6249 (mode != LED_MODE_OPER)) { 6306 (tmp & (~EMAC_LED_OVERRIDE)));
6250 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
6251 EMAC_WR(bp, EMAC_REG_EMAC_LED,
6252 (tmp & (~EMAC_LED_OVERRIDE)));
6253 }
6254 6307
6255 if (CHIP_IS_E1(bp) && 6308 if (CHIP_IS_E1(bp) &&
6256 ((speed == SPEED_2500) || 6309 ((speed == SPEED_2500) ||
@@ -6843,6 +6896,12 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6843 SINGLE_MEDIA_DIRECT(params)) && 6896 SINGLE_MEDIA_DIRECT(params)) &&
6844 (phy_vars[active_external_phy].fault_detected == 0)); 6897 (phy_vars[active_external_phy].fault_detected == 0));
6845 6898
6899 /* Update the PFC configuration in case it was changed */
6900 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
6901 vars->link_status |= LINK_STATUS_PFC_ENABLED;
6902 else
6903 vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
6904
6846 if (vars->link_up) 6905 if (vars->link_up)
6847 rc = bnx2x_update_link_up(params, vars, link_10g_plus); 6906 rc = bnx2x_update_link_up(params, vars, link_10g_plus);
6848 else 6907 else
@@ -8030,7 +8089,9 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
8030 netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected," 8089 netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected,"
8031 " Port %d from %s part number %s\n", 8090 " Port %d from %s part number %s\n",
8032 params->port, vendor_name, vendor_pn); 8091 params->port, vendor_name, vendor_pn);
8033 phy->flags |= FLAGS_SFP_NOT_APPROVED; 8092 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
8093 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG)
8094 phy->flags |= FLAGS_SFP_NOT_APPROVED;
8034 return -EINVAL; 8095 return -EINVAL;
8035} 8096}
8036 8097
@@ -9090,6 +9151,12 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9090 tmp2 &= 0xFFEF; 9151 tmp2 &= 0xFFEF;
9091 bnx2x_cl45_write(bp, phy, 9152 bnx2x_cl45_write(bp, phy,
9092 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2); 9153 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
9154 bnx2x_cl45_read(bp, phy,
9155 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
9156 &tmp2);
9157 bnx2x_cl45_write(bp, phy,
9158 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
9159 (tmp2 & 0x7fff));
9093 } 9160 }
9094 9161
9095 return 0; 9162 return 0;
@@ -9270,12 +9337,11 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
9270 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 9337 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
9271 ((1<<5) | (1<<2))); 9338 ((1<<5) | (1<<2)));
9272 } 9339 }
9273 DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n"); 9340
9274 bnx2x_8727_specific_func(phy, params, ENABLE_TX); 9341 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) {
9275 /* If transmitter is disabled, ignore false link up indication */ 9342 DP(NETIF_MSG_LINK, "Enabling 8727 TX laser\n");
9276 bnx2x_cl45_read(bp, phy, 9343 bnx2x_sfp_set_transmitter(params, phy, 1);
9277 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1); 9344 } else {
9278 if (val1 & (1<<15)) {
9279 DP(NETIF_MSG_LINK, "Tx is disabled\n"); 9345 DP(NETIF_MSG_LINK, "Tx is disabled\n");
9280 return 0; 9346 return 0;
9281 } 9347 }
@@ -9369,8 +9435,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9369 9435
9370 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 9436 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
9371 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); 9437 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
9372 bnx2x_save_spirom_version(bp, port, 9438 bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
9373 ((fw_ver1 & 0xf000)>>5) | (fw_ver1 & 0x7f),
9374 phy->ver_addr); 9439 phy->ver_addr);
9375 } else { 9440 } else {
9376 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ 9441 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
@@ -9793,6 +9858,15 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
9793 other_shmem_base_addr)); 9858 other_shmem_base_addr));
9794 9859
9795 u32 shmem_base_path[2]; 9860 u32 shmem_base_path[2];
9861
9862 /* Work around for 84833 LED failure inside RESET status */
9863 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
9864 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
9865 MDIO_AN_REG_8481_MII_CTRL_FORCE_1G);
9866 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
9867 MDIO_AN_REG_8481_1G_100T_EXT_CTRL,
9868 MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF);
9869
9796 shmem_base_path[0] = params->shmem_base; 9870 shmem_base_path[0] = params->shmem_base;
9797 shmem_base_path[1] = other_shmem_base_addr; 9871 shmem_base_path[1] = other_shmem_base_addr;
9798 9872
@@ -10103,7 +10177,7 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
10103 u8 port; 10177 u8 port;
10104 u16 val16; 10178 u16 val16;
10105 10179
10106 if (!(CHIP_IS_E1(bp))) 10180 if (!(CHIP_IS_E1x(bp)))
10107 port = BP_PATH(bp); 10181 port = BP_PATH(bp);
10108 else 10182 else
10109 port = params->port; 10183 port = params->port;
@@ -10130,7 +10204,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10130 u16 val; 10204 u16 val;
10131 u8 port; 10205 u8 port;
10132 10206
10133 if (!(CHIP_IS_E1(bp))) 10207 if (!(CHIP_IS_E1x(bp)))
10134 port = BP_PATH(bp); 10208 port = BP_PATH(bp);
10135 else 10209 else
10136 port = params->port; 10210 port = params->port;
@@ -12049,6 +12123,9 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
12049 12123
12050 bnx2x_emac_init(params, vars); 12124 bnx2x_emac_init(params, vars);
12051 12125
12126 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
12127 vars->link_status |= LINK_STATUS_PFC_ENABLED;
12128
12052 if (params->num_phys == 0) { 12129 if (params->num_phys == 0) {
12053 DP(NETIF_MSG_LINK, "No phy found for initialization !!\n"); 12130 DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
12054 return -EINVAL; 12131 return -EINVAL;
@@ -12128,10 +12205,10 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
12128 * Hold it as vars low 12205 * Hold it as vars low
12129 */ 12206 */
12130 /* clear link led */ 12207 /* clear link led */
12208 bnx2x_set_mdio_clk(bp, params->chip_id, port);
12131 bnx2x_set_led(params, vars, LED_MODE_OFF, 0); 12209 bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
12132 12210
12133 if (reset_ext_phy) { 12211 if (reset_ext_phy) {
12134 bnx2x_set_mdio_clk(bp, params->chip_id, port);
12135 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 12212 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
12136 phy_index++) { 12213 phy_index++) {
12137 if (params->phy[phy_index].link_reset) { 12214 if (params->phy[phy_index].link_reset) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index ab0a250f95fa..c25803b9c0ca 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -5354,6 +5354,7 @@
5354#define XMAC_CTRL_REG_TX_EN (0x1<<0) 5354#define XMAC_CTRL_REG_TX_EN (0x1<<0)
5355#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18) 5355#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18)
5356#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17) 5356#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17)
5357#define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON (0x1<<1)
5357#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0) 5358#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0)
5358#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3) 5359#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3)
5359#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4) 5360#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4)
@@ -6820,10 +6821,13 @@ Theotherbitsarereservedandshouldbezero*/
6820 6821
6821#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020 6822#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020
6822#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0 6823#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
6824#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40
6823#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1 6825#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
6824#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4 6826#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
6825#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6 6827#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
6826#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9 6828#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
6829#define MDIO_AN_REG_8481_1G_100T_EXT_CTRL 0xfff0
6830#define MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF 0x0008
6827#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5 6831#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
6828#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7 6832#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
6829#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8 6833#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8
@@ -6943,6 +6947,10 @@ Theotherbitsarereservedandshouldbezero*/
6943#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2 6947#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2
6944#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3 6948#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3
6945#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4 6949#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4
6950#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL 0x1000
6951#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CMPL 0x0100
6952#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP 0x0010
6953#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CAP 0x1
6946#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE 6954#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE
6947#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0 6955#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0
6948#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2 6956#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 86cdd4793992..b83897f76ee3 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -161,6 +161,12 @@ struct e1000_info;
161/* Time to wait before putting the device into D3 if there's no link (in ms). */ 161/* Time to wait before putting the device into D3 if there's no link (in ms). */
162#define LINK_TIMEOUT 100 162#define LINK_TIMEOUT 100
163 163
164/*
165 * Count for polling __E1000_RESET condition every 10-20msec.
166 * Experimentation has shown the reset can take approximately 210msec.
167 */
168#define E1000_CHECK_RESET_COUNT 25
169
164#define DEFAULT_RDTR 0 170#define DEFAULT_RDTR 0
165#define DEFAULT_RADV 8 171#define DEFAULT_RADV 8
166#define BURST_RDTR 0x20 172#define BURST_RDTR 0x20
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 2c38a65ade87..19ab2154802c 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1059,6 +1059,13 @@ static void e1000_print_hw_hang(struct work_struct *work)
1059 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 1059 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1060 /* execute the writes immediately */ 1060 /* execute the writes immediately */
1061 e1e_flush(); 1061 e1e_flush();
1062 /*
1063 * Due to rare timing issues, write to TIDV again to ensure
1064 * the write is successful
1065 */
1066 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1067 /* execute the writes immediately */
1068 e1e_flush();
1062 adapter->tx_hang_recheck = true; 1069 adapter->tx_hang_recheck = true;
1063 return; 1070 return;
1064 } 1071 }
@@ -3616,6 +3623,16 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3616 3623
3617 /* execute the writes immediately */ 3624 /* execute the writes immediately */
3618 e1e_flush(); 3625 e1e_flush();
3626
3627 /*
3628 * due to rare timing issues, write to TIDV/RDTR again to ensure the
3629 * write is successful
3630 */
3631 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3632 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3633
3634 /* execute the writes immediately */
3635 e1e_flush();
3619} 3636}
3620 3637
3621static void e1000e_update_stats(struct e1000_adapter *adapter); 3638static void e1000e_update_stats(struct e1000_adapter *adapter);
@@ -3968,6 +3985,10 @@ static int e1000_close(struct net_device *netdev)
3968{ 3985{
3969 struct e1000_adapter *adapter = netdev_priv(netdev); 3986 struct e1000_adapter *adapter = netdev_priv(netdev);
3970 struct pci_dev *pdev = adapter->pdev; 3987 struct pci_dev *pdev = adapter->pdev;
3988 int count = E1000_CHECK_RESET_COUNT;
3989
3990 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
3991 usleep_range(10000, 20000);
3971 3992
3972 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 3993 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3973 3994
@@ -5472,6 +5493,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5472 netif_device_detach(netdev); 5493 netif_device_detach(netdev);
5473 5494
5474 if (netif_running(netdev)) { 5495 if (netif_running(netdev)) {
5496 int count = E1000_CHECK_RESET_COUNT;
5497
5498 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
5499 usleep_range(10000, 20000);
5500
5475 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 5501 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5476 e1000e_down(adapter); 5502 e1000e_down(adapter);
5477 e1000_free_irq(adapter); 5503 e1000_free_irq(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index dde65f951400..652e4b09546d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -44,62 +44,94 @@
44#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ 44#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */
45#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ 45#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */
46 46
47int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, 47int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *scfg,
48 struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max) 48 struct ixgbe_dcb_config *dcfg, int tc_max)
49{ 49{
50 struct tc_configuration *src_tc_cfg = NULL; 50 struct tc_configuration *src = NULL;
51 struct tc_configuration *dst_tc_cfg = NULL; 51 struct tc_configuration *dst = NULL;
52 int i; 52 int i, j;
53 int tx = DCB_TX_CONFIG;
54 int rx = DCB_RX_CONFIG;
55 int changes = 0;
53 56
54 if (!src_dcb_cfg || !dst_dcb_cfg) 57 if (!scfg || !dcfg)
55 return -EINVAL; 58 return changes;
56 59
57 for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { 60 for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
58 src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; 61 src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0];
59 dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; 62 dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0];
60 63
61 dst_tc_cfg->path[DCB_TX_CONFIG].prio_type = 64 if (dst->path[tx].prio_type != src->path[tx].prio_type) {
62 src_tc_cfg->path[DCB_TX_CONFIG].prio_type; 65 dst->path[tx].prio_type = src->path[tx].prio_type;
66 changes |= BIT_PG_TX;
67 }
63 68
64 dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id = 69 if (dst->path[tx].bwg_id != src->path[tx].bwg_id) {
65 src_tc_cfg->path[DCB_TX_CONFIG].bwg_id; 70 dst->path[tx].bwg_id = src->path[tx].bwg_id;
71 changes |= BIT_PG_TX;
72 }
66 73
67 dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent = 74 if (dst->path[tx].bwg_percent != src->path[tx].bwg_percent) {
68 src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent; 75 dst->path[tx].bwg_percent = src->path[tx].bwg_percent;
76 changes |= BIT_PG_TX;
77 }
69 78
70 dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap = 79 if (dst->path[tx].up_to_tc_bitmap !=
71 src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap; 80 src->path[tx].up_to_tc_bitmap) {
81 dst->path[tx].up_to_tc_bitmap =
82 src->path[tx].up_to_tc_bitmap;
83 changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG);
84 }
72 85
73 dst_tc_cfg->path[DCB_RX_CONFIG].prio_type = 86 if (dst->path[rx].prio_type != src->path[rx].prio_type) {
74 src_tc_cfg->path[DCB_RX_CONFIG].prio_type; 87 dst->path[rx].prio_type = src->path[rx].prio_type;
88 changes |= BIT_PG_RX;
89 }
75 90
76 dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id = 91 if (dst->path[rx].bwg_id != src->path[rx].bwg_id) {
77 src_tc_cfg->path[DCB_RX_CONFIG].bwg_id; 92 dst->path[rx].bwg_id = src->path[rx].bwg_id;
93 changes |= BIT_PG_RX;
94 }
78 95
79 dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent = 96 if (dst->path[rx].bwg_percent != src->path[rx].bwg_percent) {
80 src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent; 97 dst->path[rx].bwg_percent = src->path[rx].bwg_percent;
98 changes |= BIT_PG_RX;
99 }
81 100
82 dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap = 101 if (dst->path[rx].up_to_tc_bitmap !=
83 src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap; 102 src->path[rx].up_to_tc_bitmap) {
103 dst->path[rx].up_to_tc_bitmap =
104 src->path[rx].up_to_tc_bitmap;
105 changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG);
106 }
84 } 107 }
85 108
86 for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { 109 for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) {
87 dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG] 110 j = i - DCB_PG_ATTR_BW_ID_0;
88 [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage 111 if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) {
89 [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; 112 dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j];
90 dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG] 113 changes |= BIT_PG_TX;
91 [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage 114 }
92 [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; 115 if (dcfg->bw_percentage[rx][j] != scfg->bw_percentage[rx][j]) {
116 dcfg->bw_percentage[rx][j] = scfg->bw_percentage[rx][j];
117 changes |= BIT_PG_RX;
118 }
93 } 119 }
94 120
95 for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { 121 for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) {
96 dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc = 122 j = i - DCB_PFC_UP_ATTR_0;
97 src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc; 123 if (dcfg->tc_config[j].dcb_pfc != scfg->tc_config[j].dcb_pfc) {
124 dcfg->tc_config[j].dcb_pfc = scfg->tc_config[j].dcb_pfc;
125 changes |= BIT_PFC;
126 }
98 } 127 }
99 128
100 dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable; 129 if (dcfg->pfc_mode_enable != scfg->pfc_mode_enable) {
130 dcfg->pfc_mode_enable = scfg->pfc_mode_enable;
131 changes |= BIT_PFC;
132 }
101 133
102 return 0; 134 return changes;
103} 135}
104 136
105static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) 137static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
@@ -179,20 +211,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
179 if (up_map != DCB_ATTR_VALUE_UNDEFINED) 211 if (up_map != DCB_ATTR_VALUE_UNDEFINED)
180 adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = 212 adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap =
181 up_map; 213 up_map;
182
183 if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type !=
184 adapter->dcb_cfg.tc_config[tc].path[0].prio_type) ||
185 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id !=
186 adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) ||
187 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
188 adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
189 (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
190 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
191 adapter->dcb_set_bitmap |= BIT_PG_TX;
192
193 if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
194 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)
195 adapter->dcb_set_bitmap |= BIT_PFC | BIT_APP_UPCHG;
196} 214}
197 215
198static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, 216static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -201,10 +219,6 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
201 struct ixgbe_adapter *adapter = netdev_priv(netdev); 219 struct ixgbe_adapter *adapter = netdev_priv(netdev);
202 220
203 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; 221 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
204
205 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
206 adapter->dcb_cfg.bw_percentage[0][bwg_id])
207 adapter->dcb_set_bitmap |= BIT_PG_TX;
208} 222}
209 223
210static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, 224static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
@@ -223,20 +237,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
223 if (up_map != DCB_ATTR_VALUE_UNDEFINED) 237 if (up_map != DCB_ATTR_VALUE_UNDEFINED)
224 adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = 238 adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap =
225 up_map; 239 up_map;
226
227 if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type !=
228 adapter->dcb_cfg.tc_config[tc].path[1].prio_type) ||
229 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id !=
230 adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) ||
231 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
232 adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
233 (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
234 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
235 adapter->dcb_set_bitmap |= BIT_PG_RX;
236
237 if (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
238 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)
239 adapter->dcb_set_bitmap |= BIT_PFC;
240} 240}
241 241
242static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, 242static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -245,10 +245,6 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
245 struct ixgbe_adapter *adapter = netdev_priv(netdev); 245 struct ixgbe_adapter *adapter = netdev_priv(netdev);
246 246
247 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; 247 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
248
249 if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
250 adapter->dcb_cfg.bw_percentage[1][bwg_id])
251 adapter->dcb_set_bitmap |= BIT_PG_RX;
252} 248}
253 249
254static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, 250static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -298,10 +294,8 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
298 294
299 adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; 295 adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
300 if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != 296 if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
301 adapter->dcb_cfg.tc_config[priority].dcb_pfc) { 297 adapter->dcb_cfg.tc_config[priority].dcb_pfc)
302 adapter->dcb_set_bitmap |= BIT_PFC;
303 adapter->temp_dcb_cfg.pfc_mode_enable = true; 298 adapter->temp_dcb_cfg.pfc_mode_enable = true;
304 }
305} 299}
306 300
307static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, 301static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
@@ -336,7 +330,8 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev)
336static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) 330static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
337{ 331{
338 struct ixgbe_adapter *adapter = netdev_priv(netdev); 332 struct ixgbe_adapter *adapter = netdev_priv(netdev);
339 int ret, i; 333 int ret = DCB_NO_HW_CHG;
334 int i;
340#ifdef IXGBE_FCOE 335#ifdef IXGBE_FCOE
341 struct dcb_app app = { 336 struct dcb_app app = {
342 .selector = DCB_APP_IDTYPE_ETHTYPE, 337 .selector = DCB_APP_IDTYPE_ETHTYPE,
@@ -355,12 +350,13 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
355 350
356 /* Fail command if not in CEE mode */ 351 /* Fail command if not in CEE mode */
357 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 352 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
358 return 1; 353 return ret;
359 354
360 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, 355 adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg,
361 MAX_TRAFFIC_CLASS); 356 &adapter->dcb_cfg,
362 if (ret) 357 MAX_TRAFFIC_CLASS);
363 return DCB_NO_HW_CHG; 358 if (!adapter->dcb_set_bitmap)
359 return ret;
364 360
365 if (adapter->dcb_cfg.pfc_mode_enable) { 361 if (adapter->dcb_cfg.pfc_mode_enable) {
366 switch (adapter->hw.mac.type) { 362 switch (adapter->hw.mac.type) {
@@ -420,6 +416,8 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
420 416
421 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 417 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
422 netdev_set_prio_tc_map(netdev, i, prio_tc[i]); 418 netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
419
420 ret = DCB_HW_CHG_RST;
423 } 421 }
424 422
425 if (adapter->dcb_set_bitmap & BIT_PFC) { 423 if (adapter->dcb_set_bitmap & BIT_PFC) {
@@ -430,7 +428,8 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
430 DCB_TX_CONFIG, prio_tc); 428 DCB_TX_CONFIG, prio_tc);
431 ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en); 429 ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
432 ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc); 430 ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc);
433 ret = DCB_HW_CHG; 431 if (ret != DCB_HW_CHG_RST)
432 ret = DCB_HW_CHG;
434 } 433 }
435 434
436 if (adapter->dcb_cfg.pfc_mode_enable) 435 if (adapter->dcb_cfg.pfc_mode_enable)
@@ -531,9 +530,6 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
531 struct ixgbe_adapter *adapter = netdev_priv(netdev); 530 struct ixgbe_adapter *adapter = netdev_priv(netdev);
532 531
533 adapter->temp_dcb_cfg.pfc_mode_enable = state; 532 adapter->temp_dcb_cfg.pfc_mode_enable = state;
534 if (adapter->temp_dcb_cfg.pfc_mode_enable !=
535 adapter->dcb_cfg.pfc_mode_enable)
536 adapter->dcb_set_bitmap |= BIT_PFC;
537} 533}
538 534
539/** 535/**
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index b806d9b4defb..c9b504e2dfc3 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2469,6 +2469,17 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
2469 return err; 2469 return err;
2470} 2470}
2471 2471
2472static inline bool needs_copy(const struct rx_ring_info *re,
2473 unsigned length)
2474{
2475#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2476 /* Some architectures need the IP header to be aligned */
2477 if (!IS_ALIGNED(re->data_addr + ETH_HLEN, sizeof(u32)))
2478 return true;
2479#endif
2480 return length < copybreak;
2481}
2482
2472/* For small just reuse existing skb for next receive */ 2483/* For small just reuse existing skb for next receive */
2473static struct sk_buff *receive_copy(struct sky2_port *sky2, 2484static struct sk_buff *receive_copy(struct sky2_port *sky2,
2474 const struct rx_ring_info *re, 2485 const struct rx_ring_info *re,
@@ -2599,7 +2610,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2599 goto error; 2610 goto error;
2600 2611
2601okay: 2612okay:
2602 if (length < copybreak) 2613 if (needs_copy(re, length))
2603 skb = receive_copy(sky2, re, length); 2614 skb = receive_copy(sky2, re, length);
2604 else 2615 else
2605 skb = receive_new(sky2, re, length); 2616 skb = receive_new(sky2, re, length);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 9e2b911a1230..d69fee41f24a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -83,8 +83,9 @@
83 83
84#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ) 84#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
85 85
86#define MLX4_EN_ALLOC_ORDER 2 86/* Use the maximum between 16384 and a single page */
87#define MLX4_EN_ALLOC_SIZE (PAGE_SIZE << MLX4_EN_ALLOC_ORDER) 87#define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384)
88#define MLX4_EN_ALLOC_ORDER get_order(MLX4_EN_ALLOC_SIZE)
88 89
89#define MLX4_EN_MAX_LRO_DESCRIPTORS 32 90#define MLX4_EN_MAX_LRO_DESCRIPTORS 32
90 91
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 7b23554f80b6..f54509377efa 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5810,7 +5810,10 @@ static void __rtl8169_resume(struct net_device *dev)
5810 5810
5811 rtl_pll_power_up(tp); 5811 rtl_pll_power_up(tp);
5812 5812
5813 rtl_lock_work(tp);
5814 napi_enable(&tp->napi);
5813 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); 5815 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
5816 rtl_unlock_work(tp);
5814 5817
5815 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); 5818 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5816} 5819}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index e85ffbd54830..48d56da62f08 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1737,10 +1737,12 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1737 struct mac_device_info *mac; 1737 struct mac_device_info *mac;
1738 1738
1739 /* Identify the MAC HW device */ 1739 /* Identify the MAC HW device */
1740 if (priv->plat->has_gmac) 1740 if (priv->plat->has_gmac) {
1741 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1741 mac = dwmac1000_setup(priv->ioaddr); 1742 mac = dwmac1000_setup(priv->ioaddr);
1742 else 1743 } else {
1743 mac = dwmac100_setup(priv->ioaddr); 1744 mac = dwmac100_setup(priv->ioaddr);
1745 }
1744 if (!mac) 1746 if (!mac)
1745 return -ENOMEM; 1747 return -ENOMEM;
1746 1748
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 0856e1b7a849..f08c85acf761 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -162,7 +162,8 @@ static int ip101a_g_config_init(struct phy_device *phydev)
162 /* Enable Auto Power Saving mode */ 162 /* Enable Auto Power Saving mode */
163 c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS); 163 c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
164 c |= IP101A_G_APS_ON; 164 c |= IP101A_G_APS_ON;
165 return c; 165
166 return phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
166} 167}
167 168
168static int ip175c_read_status(struct phy_device *phydev) 169static int ip175c_read_status(struct phy_device *phydev)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 159da2905fe9..33f8c51968b6 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -235,7 +235,7 @@ struct ppp_net {
235/* Prototypes. */ 235/* Prototypes. */
236static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 236static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
237 struct file *file, unsigned int cmd, unsigned long arg); 237 struct file *file, unsigned int cmd, unsigned long arg);
238static void ppp_xmit_process(struct ppp *ppp); 238static int ppp_xmit_process(struct ppp *ppp);
239static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 239static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
240static void ppp_push(struct ppp *ppp); 240static void ppp_push(struct ppp *ppp);
241static void ppp_channel_push(struct channel *pch); 241static void ppp_channel_push(struct channel *pch);
@@ -968,9 +968,9 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
968 proto = npindex_to_proto[npi]; 968 proto = npindex_to_proto[npi];
969 put_unaligned_be16(proto, pp); 969 put_unaligned_be16(proto, pp);
970 970
971 netif_stop_queue(dev);
972 skb_queue_tail(&ppp->file.xq, skb); 971 skb_queue_tail(&ppp->file.xq, skb);
973 ppp_xmit_process(ppp); 972 if (!ppp_xmit_process(ppp))
973 netif_stop_queue(dev);
974 return NETDEV_TX_OK; 974 return NETDEV_TX_OK;
975 975
976 outf: 976 outf:
@@ -1048,10 +1048,11 @@ static void ppp_setup(struct net_device *dev)
1048 * Called to do any work queued up on the transmit side 1048 * Called to do any work queued up on the transmit side
1049 * that can now be done. 1049 * that can now be done.
1050 */ 1050 */
1051static void 1051static int
1052ppp_xmit_process(struct ppp *ppp) 1052ppp_xmit_process(struct ppp *ppp)
1053{ 1053{
1054 struct sk_buff *skb; 1054 struct sk_buff *skb;
1055 int ret = 0;
1055 1056
1056 ppp_xmit_lock(ppp); 1057 ppp_xmit_lock(ppp);
1057 if (!ppp->closing) { 1058 if (!ppp->closing) {
@@ -1061,10 +1062,13 @@ ppp_xmit_process(struct ppp *ppp)
1061 ppp_send_frame(ppp, skb); 1062 ppp_send_frame(ppp, skb);
1062 /* If there's no work left to do, tell the core net 1063 /* If there's no work left to do, tell the core net
1063 code that we can accept some more. */ 1064 code that we can accept some more. */
1064 if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) 1065 if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) {
1065 netif_wake_queue(ppp->dev); 1066 netif_wake_queue(ppp->dev);
1067 ret = 1;
1068 }
1066 } 1069 }
1067 ppp_xmit_unlock(ppp); 1070 ppp_xmit_unlock(ppp);
1071 return ret;
1068} 1072}
1069 1073
1070static inline struct sk_buff * 1074static inline struct sk_buff *