aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c12
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.h3
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c41
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h110
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c160
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c464
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c9
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c12
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c40
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c33
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c7
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c7
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c164
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c7
-rw-r--r--drivers/net/ethernet/marvell/sky2.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c21
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c10
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/renesas/Kconfig5
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c22
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h15
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c5
-rw-r--r--drivers/net/ethernet/tile/tilepro.c77
-rw-r--r--drivers/net/ethernet/via/via-rhine.c12
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c6
48 files changed, 978 insertions, 442 deletions
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 40ac41436549..c926857e8205 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2476,7 +2476,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
2476 "pcie phy link down %x\n", status); 2476 "pcie phy link down %x\n", status);
2477 if (netif_running(adapter->netdev)) { /* reset MAC */ 2477 if (netif_running(adapter->netdev)) { /* reset MAC */
2478 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 2478 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
2479 schedule_work(&adapter->pcie_dma_to_rst_task); 2479 schedule_work(&adapter->reset_dev_task);
2480 return IRQ_HANDLED; 2480 return IRQ_HANDLED;
2481 } 2481 }
2482 } 2482 }
@@ -2488,7 +2488,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
2488 "pcie DMA r/w error (status = 0x%x)\n", 2488 "pcie DMA r/w error (status = 0x%x)\n",
2489 status); 2489 status);
2490 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 2490 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
2491 schedule_work(&adapter->pcie_dma_to_rst_task); 2491 schedule_work(&adapter->reset_dev_task);
2492 return IRQ_HANDLED; 2492 return IRQ_HANDLED;
2493 } 2493 }
2494 2494
@@ -2633,10 +2633,10 @@ static void atl1_down(struct atl1_adapter *adapter)
2633 atl1_clean_rx_ring(adapter); 2633 atl1_clean_rx_ring(adapter);
2634} 2634}
2635 2635
2636static void atl1_tx_timeout_task(struct work_struct *work) 2636static void atl1_reset_dev_task(struct work_struct *work)
2637{ 2637{
2638 struct atl1_adapter *adapter = 2638 struct atl1_adapter *adapter =
2639 container_of(work, struct atl1_adapter, tx_timeout_task); 2639 container_of(work, struct atl1_adapter, reset_dev_task);
2640 struct net_device *netdev = adapter->netdev; 2640 struct net_device *netdev = adapter->netdev;
2641 2641
2642 netif_device_detach(netdev); 2642 netif_device_detach(netdev);
@@ -3038,12 +3038,10 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
3038 (unsigned long)adapter); 3038 (unsigned long)adapter);
3039 adapter->phy_timer_pending = false; 3039 adapter->phy_timer_pending = false;
3040 3040
3041 INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); 3041 INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
3042 3042
3043 INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); 3043 INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
3044 3044
3045 INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
3046
3047 err = register_netdev(netdev); 3045 err = register_netdev(netdev);
3048 if (err) 3046 if (err)
3049 goto err_common; 3047 goto err_common;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
index 109d6da8be97..e04bf4d71e46 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.h
+++ b/drivers/net/ethernet/atheros/atlx/atl1.h
@@ -758,9 +758,8 @@ struct atl1_adapter {
758 u16 link_speed; 758 u16 link_speed;
759 u16 link_duplex; 759 u16 link_duplex;
760 spinlock_t lock; 760 spinlock_t lock;
761 struct work_struct tx_timeout_task; 761 struct work_struct reset_dev_task;
762 struct work_struct link_chg_task; 762 struct work_struct link_chg_task;
763 struct work_struct pcie_dma_to_rst_task;
764 763
765 struct timer_list phy_config_timer; 764 struct timer_list phy_config_timer;
766 bool phy_timer_pending; 765 bool phy_timer_pending;
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index 3cd8837236dc..c9e9dc57986c 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -194,7 +194,7 @@ static void atlx_tx_timeout(struct net_device *netdev)
194{ 194{
195 struct atlx_adapter *adapter = netdev_priv(netdev); 195 struct atlx_adapter *adapter = netdev_priv(netdev);
196 /* Do the reset outside of interrupt context */ 196 /* Do the reset outside of interrupt context */
197 schedule_work(&adapter->tx_timeout_task); 197 schedule_work(&adapter->reset_dev_task);
198} 198}
199 199
200/* 200/*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e37161f19250..2c9ee552dffc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1173,6 +1173,13 @@ enum {
1173}; 1173};
1174 1174
1175 1175
1176struct bnx2x_prev_path_list {
1177 u8 bus;
1178 u8 slot;
1179 u8 path;
1180 struct list_head list;
1181};
1182
1176struct bnx2x { 1183struct bnx2x {
1177 /* Fields used in the tx and intr/napi performance paths 1184 /* Fields used in the tx and intr/napi performance paths
1178 * are grouped together in the beginning of the structure 1185 * are grouped together in the beginning of the structure
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index f1f3ca65667a..4b054812713a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1721,6 +1721,29 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
1721 } while (0) 1721 } while (0)
1722#endif 1722#endif
1723 1723
1724bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1725{
1726 /* build FW version dword */
1727 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1728 (BCM_5710_FW_MINOR_VERSION << 8) +
1729 (BCM_5710_FW_REVISION_VERSION << 16) +
1730 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1731
1732 /* read loaded FW from chip */
1733 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1734
1735 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1736
1737 if (loaded_fw != my_fw) {
1738 if (is_err)
1739 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1740 loaded_fw, my_fw);
1741 return false;
1742 }
1743
1744 return true;
1745}
1746
1724/* must be called with rtnl_lock */ 1747/* must be called with rtnl_lock */
1725int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 1748int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1726{ 1749{
@@ -1815,23 +1838,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1815 } 1838 }
1816 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP && 1839 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
1817 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) { 1840 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
1818 /* build FW version dword */
1819 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1820 (BCM_5710_FW_MINOR_VERSION << 8) +
1821 (BCM_5710_FW_REVISION_VERSION << 16) +
1822 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1823
1824 /* read loaded FW from chip */
1825 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1826
1827 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x",
1828 loaded_fw, my_fw);
1829
1830 /* abort nic load if version mismatch */ 1841 /* abort nic load if version mismatch */
1831 if (my_fw != loaded_fw) { 1842 if (!bnx2x_test_firmware_version(bp, true)) {
1832 BNX2X_ERR("bnx2x with FW %x already loaded, "
1833 "which mismatches my %x FW. aborting",
1834 loaded_fw, my_fw);
1835 rc = -EBUSY; 1843 rc = -EBUSY;
1836 LOAD_ERROR_EXIT(bp, load_error2); 1844 LOAD_ERROR_EXIT(bp, load_error2);
1837 } 1845 }
@@ -1866,7 +1874,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1866 * bnx2x_periodic_task(). 1874 * bnx2x_periodic_task().
1867 */ 1875 */
1868 smp_mb(); 1876 smp_mb();
1869 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1870 } else 1877 } else
1871 bp->port.pmf = 0; 1878 bp->port.pmf = 0;
1872 1879
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 8b163388659a..5c27454d2ec2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -431,6 +431,9 @@ void bnx2x_panic_dump(struct bnx2x *bp);
431 431
432void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); 432void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
433 433
434/* validate currect fw is loaded */
435bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err);
436
434/* dev_close main block */ 437/* dev_close main block */
435int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); 438int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
436 439
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index cd6dfa9eaa3a..b9b263323436 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -25,31 +25,31 @@
25 (IRO[149].base + ((funcId) * IRO[149].m1)) 25 (IRO[149].base + ((funcId) * IRO[149].m1))
26#define CSTORM_IGU_MODE_OFFSET (IRO[157].base) 26#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
27#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ 27#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
28 (IRO[315].base + ((pfId) * IRO[315].m1))
29#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
30 (IRO[316].base + ((pfId) * IRO[316].m1)) 28 (IRO[316].base + ((pfId) * IRO[316].m1))
29#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
30 (IRO[317].base + ((pfId) * IRO[317].m1))
31#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ 31#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
32 (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2)) 32 (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
33#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ 33#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
34 (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) 34 (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
35#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ 35#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
36 (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) 36 (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
37#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ 37#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
38 (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) 38 (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
39#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ 39#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
40 (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2)) 40 (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
41#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ 41#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
42 (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) 42 (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
43#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ 43#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
44 (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) 44 (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
45#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ 45#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
46 (IRO[314].base + ((pfId) * IRO[314].m1)) 46 (IRO[315].base + ((pfId) * IRO[315].m1))
47#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 47#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
48 (IRO[306].base + ((pfId) * IRO[306].m1)) 48 (IRO[307].base + ((pfId) * IRO[307].m1))
49#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 49#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
50 (IRO[305].base + ((pfId) * IRO[305].m1)) 50 (IRO[306].base + ((pfId) * IRO[306].m1))
51#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 51#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
52 (IRO[304].base + ((pfId) * IRO[304].m1)) 52 (IRO[305].base + ((pfId) * IRO[305].m1))
53#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 53#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
54 (IRO[151].base + ((funcId) * IRO[151].m1)) 54 (IRO[151].base + ((funcId) * IRO[151].m1))
55#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ 55#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
@@ -96,37 +96,37 @@
96#define TSTORM_FUNC_EN_OFFSET(funcId) \ 96#define TSTORM_FUNC_EN_OFFSET(funcId) \
97 (IRO[103].base + ((funcId) * IRO[103].m1)) 97 (IRO[103].base + ((funcId) * IRO[103].m1))
98#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ 98#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
99 (IRO[271].base + ((pfId) * IRO[271].m1))
100#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
101 (IRO[272].base + ((pfId) * IRO[272].m1)) 99 (IRO[272].base + ((pfId) * IRO[272].m1))
102#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ 100#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
103 (IRO[273].base + ((pfId) * IRO[273].m1)) 101 (IRO[273].base + ((pfId) * IRO[273].m1))
104#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ 102#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
105 (IRO[274].base + ((pfId) * IRO[274].m1)) 103 (IRO[274].base + ((pfId) * IRO[274].m1))
104#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
105 (IRO[275].base + ((pfId) * IRO[275].m1))
106#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 106#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
107 (IRO[270].base + ((pfId) * IRO[270].m1)) 107 (IRO[271].base + ((pfId) * IRO[271].m1))
108#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 108#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
109 (IRO[269].base + ((pfId) * IRO[269].m1)) 109 (IRO[270].base + ((pfId) * IRO[270].m1))
110#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 110#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
111 (IRO[268].base + ((pfId) * IRO[268].m1)) 111 (IRO[269].base + ((pfId) * IRO[269].m1))
112#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ 112#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
113 (IRO[267].base + ((pfId) * IRO[267].m1)) 113 (IRO[268].base + ((pfId) * IRO[268].m1))
114#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ 114#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
115 (IRO[276].base + ((pfId) * IRO[276].m1)) 115 (IRO[277].base + ((pfId) * IRO[277].m1))
116#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ 116#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
117 (IRO[263].base + ((pfId) * IRO[263].m1))
118#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
119 (IRO[264].base + ((pfId) * IRO[264].m1)) 117 (IRO[264].base + ((pfId) * IRO[264].m1))
120#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ 118#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
121 (IRO[265].base + ((pfId) * IRO[265].m1)) 119 (IRO[265].base + ((pfId) * IRO[265].m1))
122#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ 120#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
123 (IRO[266].base + ((pfId) * IRO[266].m1)) 121 (IRO[266].base + ((pfId) * IRO[266].m1))
122#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
123 (IRO[267].base + ((pfId) * IRO[267].m1))
124#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ 124#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
125 (IRO[202].base + ((pfId) * IRO[202].m1)) 125 (IRO[202].base + ((pfId) * IRO[202].m1))
126#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 126#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
127 (IRO[105].base + ((funcId) * IRO[105].m1)) 127 (IRO[105].base + ((funcId) * IRO[105].m1))
128#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ 128#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
129 (IRO[216].base + ((pfId) * IRO[216].m1)) 129 (IRO[217].base + ((pfId) * IRO[217].m1))
130#define TSTORM_VF_TO_PF_OFFSET(funcId) \ 130#define TSTORM_VF_TO_PF_OFFSET(funcId) \
131 (IRO[104].base + ((funcId) * IRO[104].m1)) 131 (IRO[104].base + ((funcId) * IRO[104].m1))
132#define USTORM_AGG_DATA_OFFSET (IRO[206].base) 132#define USTORM_AGG_DATA_OFFSET (IRO[206].base)
@@ -140,29 +140,29 @@
140#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ 140#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
141 (IRO[183].base + ((portId) * IRO[183].m1)) 141 (IRO[183].base + ((portId) * IRO[183].m1))
142#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ 142#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
143 (IRO[317].base + ((pfId) * IRO[317].m1)) 143 (IRO[318].base + ((pfId) * IRO[318].m1))
144#define USTORM_FUNC_EN_OFFSET(funcId) \ 144#define USTORM_FUNC_EN_OFFSET(funcId) \
145 (IRO[178].base + ((funcId) * IRO[178].m1)) 145 (IRO[178].base + ((funcId) * IRO[178].m1))
146#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ 146#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
147 (IRO[281].base + ((pfId) * IRO[281].m1))
148#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
149 (IRO[282].base + ((pfId) * IRO[282].m1)) 147 (IRO[282].base + ((pfId) * IRO[282].m1))
148#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
149 (IRO[283].base + ((pfId) * IRO[283].m1))
150#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ 150#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
151 (IRO[286].base + ((pfId) * IRO[286].m1)) 151 (IRO[287].base + ((pfId) * IRO[287].m1))
152#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ 152#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
153 (IRO[283].base + ((pfId) * IRO[283].m1)) 153 (IRO[284].base + ((pfId) * IRO[284].m1))
154#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 154#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
155 (IRO[279].base + ((pfId) * IRO[279].m1)) 155 (IRO[280].base + ((pfId) * IRO[280].m1))
156#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 156#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
157 (IRO[278].base + ((pfId) * IRO[278].m1)) 157 (IRO[279].base + ((pfId) * IRO[279].m1))
158#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 158#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
159 (IRO[277].base + ((pfId) * IRO[277].m1)) 159 (IRO[278].base + ((pfId) * IRO[278].m1))
160#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ 160#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
161 (IRO[280].base + ((pfId) * IRO[280].m1)) 161 (IRO[281].base + ((pfId) * IRO[281].m1))
162#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ 162#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
163 (IRO[284].base + ((pfId) * IRO[284].m1))
164#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
165 (IRO[285].base + ((pfId) * IRO[285].m1)) 163 (IRO[285].base + ((pfId) * IRO[285].m1))
164#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
165 (IRO[286].base + ((pfId) * IRO[286].m1))
166#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ 166#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
167 (IRO[182].base + ((pfId) * IRO[182].m1)) 167 (IRO[182].base + ((pfId) * IRO[182].m1))
168#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 168#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -188,39 +188,39 @@
188#define XSTORM_FUNC_EN_OFFSET(funcId) \ 188#define XSTORM_FUNC_EN_OFFSET(funcId) \
189 (IRO[47].base + ((funcId) * IRO[47].m1)) 189 (IRO[47].base + ((funcId) * IRO[47].m1))
190#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ 190#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
191 (IRO[294].base + ((pfId) * IRO[294].m1)) 191 (IRO[295].base + ((pfId) * IRO[295].m1))
192#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ 192#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
193 (IRO[297].base + ((pfId) * IRO[297].m1))
194#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
195 (IRO[298].base + ((pfId) * IRO[298].m1)) 193 (IRO[298].base + ((pfId) * IRO[298].m1))
196#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ 194#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
197 (IRO[299].base + ((pfId) * IRO[299].m1)) 195 (IRO[299].base + ((pfId) * IRO[299].m1))
198#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ 196#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
199 (IRO[300].base + ((pfId) * IRO[300].m1)) 197 (IRO[300].base + ((pfId) * IRO[300].m1))
200#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ 198#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
201 (IRO[301].base + ((pfId) * IRO[301].m1)) 199 (IRO[301].base + ((pfId) * IRO[301].m1))
202#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ 200#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
203 (IRO[302].base + ((pfId) * IRO[302].m1)) 201 (IRO[302].base + ((pfId) * IRO[302].m1))
204#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ 202#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
205 (IRO[303].base + ((pfId) * IRO[303].m1)) 203 (IRO[303].base + ((pfId) * IRO[303].m1))
204#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
205 (IRO[304].base + ((pfId) * IRO[304].m1))
206#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 206#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
207 (IRO[293].base + ((pfId) * IRO[293].m1)) 207 (IRO[294].base + ((pfId) * IRO[294].m1))
208#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 208#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
209 (IRO[292].base + ((pfId) * IRO[292].m1)) 209 (IRO[293].base + ((pfId) * IRO[293].m1))
210#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 210#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
211 (IRO[291].base + ((pfId) * IRO[291].m1)) 211 (IRO[292].base + ((pfId) * IRO[292].m1))
212#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ 212#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
213 (IRO[296].base + ((pfId) * IRO[296].m1)) 213 (IRO[297].base + ((pfId) * IRO[297].m1))
214#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ 214#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
215 (IRO[295].base + ((pfId) * IRO[295].m1)) 215 (IRO[296].base + ((pfId) * IRO[296].m1))
216#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ 216#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
217 (IRO[290].base + ((pfId) * IRO[290].m1)) 217 (IRO[291].base + ((pfId) * IRO[291].m1))
218#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ 218#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
219 (IRO[289].base + ((pfId) * IRO[289].m1)) 219 (IRO[290].base + ((pfId) * IRO[290].m1))
220#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ 220#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
221 (IRO[288].base + ((pfId) * IRO[288].m1)) 221 (IRO[289].base + ((pfId) * IRO[289].m1))
222#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ 222#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
223 (IRO[287].base + ((pfId) * IRO[287].m1)) 223 (IRO[288].base + ((pfId) * IRO[288].m1))
224#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ 224#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
225 (IRO[44].base + ((pfId) * IRO[44].m1)) 225 (IRO[44].base + ((pfId) * IRO[44].m1))
226#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 226#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 5d71b7d43237..dbff5915b81a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1251,6 +1251,9 @@ struct drv_func_mb {
1251 1251
1252 #define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000 1252 #define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000
1253 1253
1254 #define DRV_MSG_CODE_INITIATE_FLR 0x02000000
1255 #define REQ_BC_VER_4_INITIATE_FLR 0x00070213
1256
1254 #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 1257 #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
1255 #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 1258 #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
1256 #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 1259 #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index beb4cdbdb6e1..64392ec410a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -35,7 +35,6 @@
35#define ETH_MAX_PACKET_SIZE 1500 35#define ETH_MAX_PACKET_SIZE 1500
36#define ETH_MAX_JUMBO_PACKET_SIZE 9600 36#define ETH_MAX_JUMBO_PACKET_SIZE 9600
37#define MDIO_ACCESS_TIMEOUT 1000 37#define MDIO_ACCESS_TIMEOUT 1000
38#define BMAC_CONTROL_RX_ENABLE 2
39#define WC_LANE_MAX 4 38#define WC_LANE_MAX 4
40#define I2C_SWITCH_WIDTH 2 39#define I2C_SWITCH_WIDTH 2
41#define I2C_BSC0 0 40#define I2C_BSC0 0
@@ -943,6 +942,12 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
943 const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : 942 const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
944 DCBX_E3B0_MAX_NUM_COS_PORT0; 943 DCBX_E3B0_MAX_NUM_COS_PORT0;
945 944
945 if (pri >= max_num_of_cos) {
946 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
947 "parameter Illegal strict priority\n");
948 return -EINVAL;
949 }
950
946 if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) { 951 if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
947 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " 952 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
948 "parameter There can't be two COS's with " 953 "parameter There can't be two COS's with "
@@ -950,12 +955,6 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
950 return -EINVAL; 955 return -EINVAL;
951 } 956 }
952 957
953 if (pri > max_num_of_cos) {
954 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
955 "parameter Illegal strict priority\n");
956 return -EINVAL;
957 }
958
959 sp_pri_to_cos[pri] = cos_entry; 958 sp_pri_to_cos[pri] = cos_entry;
960 return 0; 959 return 0;
961 960
@@ -1372,7 +1371,14 @@ static void bnx2x_update_pfc_xmac(struct link_params *params,
1372 pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN | 1371 pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN |
1373 XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN | 1372 XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
1374 XMAC_PFC_CTRL_HI_REG_RX_PFC_EN | 1373 XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
1375 XMAC_PFC_CTRL_HI_REG_TX_PFC_EN; 1374 XMAC_PFC_CTRL_HI_REG_TX_PFC_EN |
1375 XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
1376 /* Write pause and PFC registers */
1377 REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
1378 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
1379 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
1380 pfc1_val &= ~XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
1381
1376 } 1382 }
1377 1383
1378 /* Write pause and PFC registers */ 1384 /* Write pause and PFC registers */
@@ -3649,6 +3655,33 @@ static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
3649 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) { 3655 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
3650 bnx2x_cl22_read(bp, phy, 0x4, &ld_pause); 3656 bnx2x_cl22_read(bp, phy, 0x4, &ld_pause);
3651 bnx2x_cl22_read(bp, phy, 0x5, &lp_pause); 3657 bnx2x_cl22_read(bp, phy, 0x5, &lp_pause);
3658 } else if (CHIP_IS_E3(bp) &&
3659 SINGLE_MEDIA_DIRECT(params)) {
3660 u8 lane = bnx2x_get_warpcore_lane(phy, params);
3661 u16 gp_status, gp_mask;
3662 bnx2x_cl45_read(bp, phy,
3663 MDIO_AN_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_4,
3664 &gp_status);
3665 gp_mask = (MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL |
3666 MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP) <<
3667 lane;
3668 if ((gp_status & gp_mask) == gp_mask) {
3669 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
3670 MDIO_AN_REG_ADV_PAUSE, &ld_pause);
3671 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
3672 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
3673 } else {
3674 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
3675 MDIO_AN_REG_CL37_FC_LD, &ld_pause);
3676 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
3677 MDIO_AN_REG_CL37_FC_LP, &lp_pause);
3678 ld_pause = ((ld_pause &
3679 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
3680 << 3);
3681 lp_pause = ((lp_pause &
3682 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
3683 << 3);
3684 }
3652 } else { 3685 } else {
3653 bnx2x_cl45_read(bp, phy, 3686 bnx2x_cl45_read(bp, phy,
3654 MDIO_AN_DEVAD, 3687 MDIO_AN_DEVAD,
@@ -3699,7 +3732,23 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3699 u16 val16 = 0, lane, bam37 = 0; 3732 u16 val16 = 0, lane, bam37 = 0;
3700 struct bnx2x *bp = params->bp; 3733 struct bnx2x *bp = params->bp;
3701 DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n"); 3734 DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
3702 3735 /* Set to default registers that may be overriden by 10G force */
3736 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3737 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
3738 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3739 MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
3740 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3741 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0);
3742 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3743 MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff);
3744 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3745 MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555);
3746 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3747 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0);
3748 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3749 MDIO_WC_REG_RX66_CONTROL, 0x7415);
3750 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3751 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190);
3703 /* Disable Autoneg: re-enable it after adv is done. */ 3752 /* Disable Autoneg: re-enable it after adv is done. */
3704 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3753 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3705 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0); 3754 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0);
@@ -3945,13 +3994,13 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
3945 3994
3946 } else { 3995 } else {
3947 misc1_val |= 0x9; 3996 misc1_val |= 0x9;
3948 tap_val = ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | 3997 tap_val = ((0x0f << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
3949 (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | 3998 (0x2b << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
3950 (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)); 3999 (0x02 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
3951 tx_driver_val = 4000 tx_driver_val =
3952 ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | 4001 ((0x03 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
3953 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | 4002 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
3954 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)); 4003 (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
3955 } 4004 }
3956 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4005 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3957 MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val); 4006 MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
@@ -4369,7 +4418,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4369 switch (serdes_net_if) { 4418 switch (serdes_net_if) {
4370 case PORT_HW_CFG_NET_SERDES_IF_KR: 4419 case PORT_HW_CFG_NET_SERDES_IF_KR:
4371 /* Enable KR Auto Neg */ 4420 /* Enable KR Auto Neg */
4372 if (params->loopback_mode == LOOPBACK_NONE) 4421 if (params->loopback_mode != LOOPBACK_EXT)
4373 bnx2x_warpcore_enable_AN_KR(phy, params, vars); 4422 bnx2x_warpcore_enable_AN_KR(phy, params, vars);
4374 else { 4423 else {
4375 DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n"); 4424 DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n");
@@ -6167,12 +6216,14 @@ int bnx2x_set_led(struct link_params *params,
6167 6216
6168 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 6217 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
6169 if (params->phy[EXT_PHY1].type == 6218 if (params->phy[EXT_PHY1].type ==
6170 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) 6219 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
6171 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp & 0xfff1); 6220 tmp &= ~(EMAC_LED_1000MB_OVERRIDE |
6172 else { 6221 EMAC_LED_100MB_OVERRIDE |
6173 EMAC_WR(bp, EMAC_REG_EMAC_LED, 6222 EMAC_LED_10MB_OVERRIDE);
6174 (tmp | EMAC_LED_OVERRIDE)); 6223 else
6175 } 6224 tmp |= EMAC_LED_OVERRIDE;
6225
6226 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp);
6176 break; 6227 break;
6177 6228
6178 case LED_MODE_OPER: 6229 case LED_MODE_OPER:
@@ -6227,10 +6278,15 @@ int bnx2x_set_led(struct link_params *params,
6227 hw_led_mode); 6278 hw_led_mode);
6228 } else if ((params->phy[EXT_PHY1].type == 6279 } else if ((params->phy[EXT_PHY1].type ==
6229 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) && 6280 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) &&
6230 (mode != LED_MODE_OPER)) { 6281 (mode == LED_MODE_ON)) {
6231 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); 6282 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
6232 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 6283 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
6233 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp | 0x3); 6284 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp |
6285 EMAC_LED_OVERRIDE | EMAC_LED_1000MB_OVERRIDE);
6286 /* Break here; otherwise, it'll disable the
6287 * intended override.
6288 */
6289 break;
6234 } else 6290 } else
6235 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 6291 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
6236 hw_led_mode); 6292 hw_led_mode);
@@ -6245,13 +6301,9 @@ int bnx2x_set_led(struct link_params *params,
6245 LED_BLINK_RATE_VAL_E1X_E2); 6301 LED_BLINK_RATE_VAL_E1X_E2);
6246 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + 6302 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
6247 port*4, 1); 6303 port*4, 1);
6248 if ((params->phy[EXT_PHY1].type != 6304 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
6249 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) && 6305 EMAC_WR(bp, EMAC_REG_EMAC_LED,
6250 (mode != LED_MODE_OPER)) { 6306 (tmp & (~EMAC_LED_OVERRIDE)));
6251 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
6252 EMAC_WR(bp, EMAC_REG_EMAC_LED,
6253 (tmp & (~EMAC_LED_OVERRIDE)));
6254 }
6255 6307
6256 if (CHIP_IS_E1(bp) && 6308 if (CHIP_IS_E1(bp) &&
6257 ((speed == SPEED_2500) || 6309 ((speed == SPEED_2500) ||
@@ -6844,6 +6896,12 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6844 SINGLE_MEDIA_DIRECT(params)) && 6896 SINGLE_MEDIA_DIRECT(params)) &&
6845 (phy_vars[active_external_phy].fault_detected == 0)); 6897 (phy_vars[active_external_phy].fault_detected == 0));
6846 6898
6899 /* Update the PFC configuration in case it was changed */
6900 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
6901 vars->link_status |= LINK_STATUS_PFC_ENABLED;
6902 else
6903 vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
6904
6847 if (vars->link_up) 6905 if (vars->link_up)
6848 rc = bnx2x_update_link_up(params, vars, link_10g_plus); 6906 rc = bnx2x_update_link_up(params, vars, link_10g_plus);
6849 else 6907 else
@@ -8031,7 +8089,9 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
8031 netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected," 8089 netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected,"
8032 " Port %d from %s part number %s\n", 8090 " Port %d from %s part number %s\n",
8033 params->port, vendor_name, vendor_pn); 8091 params->port, vendor_name, vendor_pn);
8034 phy->flags |= FLAGS_SFP_NOT_APPROVED; 8092 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
8093 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG)
8094 phy->flags |= FLAGS_SFP_NOT_APPROVED;
8035 return -EINVAL; 8095 return -EINVAL;
8036} 8096}
8037 8097
@@ -9091,6 +9151,12 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9091 tmp2 &= 0xFFEF; 9151 tmp2 &= 0xFFEF;
9092 bnx2x_cl45_write(bp, phy, 9152 bnx2x_cl45_write(bp, phy,
9093 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2); 9153 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
9154 bnx2x_cl45_read(bp, phy,
9155 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
9156 &tmp2);
9157 bnx2x_cl45_write(bp, phy,
9158 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
9159 (tmp2 & 0x7fff));
9094 } 9160 }
9095 9161
9096 return 0; 9162 return 0;
@@ -9271,12 +9337,11 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
9271 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 9337 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
9272 ((1<<5) | (1<<2))); 9338 ((1<<5) | (1<<2)));
9273 } 9339 }
9274 DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n"); 9340
9275 bnx2x_8727_specific_func(phy, params, ENABLE_TX); 9341 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) {
9276 /* If transmitter is disabled, ignore false link up indication */ 9342 DP(NETIF_MSG_LINK, "Enabling 8727 TX laser\n");
9277 bnx2x_cl45_read(bp, phy, 9343 bnx2x_sfp_set_transmitter(params, phy, 1);
9278 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1); 9344 } else {
9279 if (val1 & (1<<15)) {
9280 DP(NETIF_MSG_LINK, "Tx is disabled\n"); 9345 DP(NETIF_MSG_LINK, "Tx is disabled\n");
9281 return 0; 9346 return 0;
9282 } 9347 }
@@ -9370,8 +9435,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9370 9435
9371 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 9436 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
9372 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); 9437 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
9373 bnx2x_save_spirom_version(bp, port, 9438 bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
9374 ((fw_ver1 & 0xf000)>>5) | (fw_ver1 & 0x7f),
9375 phy->ver_addr); 9439 phy->ver_addr);
9376 } else { 9440 } else {
9377 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ 9441 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
@@ -9794,6 +9858,15 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
9794 other_shmem_base_addr)); 9858 other_shmem_base_addr));
9795 9859
9796 u32 shmem_base_path[2]; 9860 u32 shmem_base_path[2];
9861
9862 /* Work around for 84833 LED failure inside RESET status */
9863 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
9864 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
9865 MDIO_AN_REG_8481_MII_CTRL_FORCE_1G);
9866 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
9867 MDIO_AN_REG_8481_1G_100T_EXT_CTRL,
9868 MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF);
9869
9797 shmem_base_path[0] = params->shmem_base; 9870 shmem_base_path[0] = params->shmem_base;
9798 shmem_base_path[1] = other_shmem_base_addr; 9871 shmem_base_path[1] = other_shmem_base_addr;
9799 9872
@@ -10104,7 +10177,7 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
10104 u8 port; 10177 u8 port;
10105 u16 val16; 10178 u16 val16;
10106 10179
10107 if (!(CHIP_IS_E1(bp))) 10180 if (!(CHIP_IS_E1x(bp)))
10108 port = BP_PATH(bp); 10181 port = BP_PATH(bp);
10109 else 10182 else
10110 port = params->port; 10183 port = params->port;
@@ -10131,7 +10204,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10131 u16 val; 10204 u16 val;
10132 u8 port; 10205 u8 port;
10133 10206
10134 if (!(CHIP_IS_E1(bp))) 10207 if (!(CHIP_IS_E1x(bp)))
10135 port = BP_PATH(bp); 10208 port = BP_PATH(bp);
10136 else 10209 else
10137 port = params->port; 10210 port = params->port;
@@ -12050,6 +12123,9 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
12050 12123
12051 bnx2x_emac_init(params, vars); 12124 bnx2x_emac_init(params, vars);
12052 12125
12126 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
12127 vars->link_status |= LINK_STATUS_PFC_ENABLED;
12128
12053 if (params->num_phys == 0) { 12129 if (params->num_phys == 0) {
12054 DP(NETIF_MSG_LINK, "No phy found for initialization !!\n"); 12130 DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
12055 return -EINVAL; 12131 return -EINVAL;
@@ -12129,10 +12205,10 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
12129 * Hold it as vars low 12205 * Hold it as vars low
12130 */ 12206 */
12131 /* clear link led */ 12207 /* clear link led */
12208 bnx2x_set_mdio_clk(bp, params->chip_id, port);
12132 bnx2x_set_led(params, vars, LED_MODE_OFF, 0); 12209 bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
12133 12210
12134 if (reset_ext_phy) { 12211 if (reset_ext_phy) {
12135 bnx2x_set_mdio_clk(bp, params->chip_id, port);
12136 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 12212 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
12137 phy_index++) { 12213 phy_index++) {
12138 if (params->phy[phy_index].link_reset) { 12214 if (params->phy[phy_index].link_reset) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 7ba557a610da..763535ee4832 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -89,6 +89,8 @@
89#define PFC_BRB_FULL_LB_XON_THRESHOLD 250 89#define PFC_BRB_FULL_LB_XON_THRESHOLD 250
90 90
91#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b)) 91#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b))
92
93#define BMAC_CONTROL_RX_ENABLE 2
92/***********************************************************/ 94/***********************************************************/
93/* Structs */ 95/* Structs */
94/***********************************************************/ 96/***********************************************************/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f7f9aa807264..e077d2508727 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -52,6 +52,7 @@
52#include <linux/prefetch.h> 52#include <linux/prefetch.h>
53#include <linux/zlib.h> 53#include <linux/zlib.h>
54#include <linux/io.h> 54#include <linux/io.h>
55#include <linux/semaphore.h>
55#include <linux/stringify.h> 56#include <linux/stringify.h>
56#include <linux/vmalloc.h> 57#include <linux/vmalloc.h>
57 58
@@ -211,6 +212,10 @@ static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
211 212
212MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); 213MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
213 214
215/* Global resources for unloading a previously loaded device */
216#define BNX2X_PREV_WAIT_NEEDED 1
217static DEFINE_SEMAPHORE(bnx2x_prev_sem);
218static LIST_HEAD(bnx2x_prev_list);
214/**************************************************************************** 219/****************************************************************************
215* General service functions 220* General service functions
216****************************************************************************/ 221****************************************************************************/
@@ -8812,109 +8817,371 @@ static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
8812 bnx2x_undi_int_disable_e1h(bp); 8817 bnx2x_undi_int_disable_e1h(bp);
8813} 8818}
8814 8819
8815static void __devinit bnx2x_undi_unload(struct bnx2x *bp) 8820static void __devinit bnx2x_prev_unload_close_mac(struct bnx2x *bp)
8816{ 8821{
8817 u32 val; 8822 u32 val, base_addr, offset, mask, reset_reg;
8823 bool mac_stopped = false;
8824 u8 port = BP_PORT(bp);
8818 8825
8819 /* possibly another driver is trying to reset the chip */ 8826 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
8820 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
8821 8827
8822 /* check if doorbell queue is reset */ 8828 if (!CHIP_IS_E3(bp)) {
8823 if (REG_RD(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET) 8829 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
8824 & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 8830 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
8831 if ((mask & reset_reg) && val) {
8832 u32 wb_data[2];
8833 BNX2X_DEV_INFO("Disable bmac Rx\n");
8834 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
8835 : NIG_REG_INGRESS_BMAC0_MEM;
8836 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
8837 : BIGMAC_REGISTER_BMAC_CONTROL;
8825 8838
8826 /* 8839 /*
8827 * Check if it is the UNDI driver 8840 * use rd/wr since we cannot use dmae. This is safe
8841 * since MCP won't access the bus due to the request
8842 * to unload, and no function on the path can be
8843 * loaded at this time.
8844 */
8845 wb_data[0] = REG_RD(bp, base_addr + offset);
8846 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
8847 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
8848 REG_WR(bp, base_addr + offset, wb_data[0]);
8849 REG_WR(bp, base_addr + offset + 0x4, wb_data[1]);
8850
8851 }
8852 BNX2X_DEV_INFO("Disable emac Rx\n");
8853 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0);
8854
8855 mac_stopped = true;
8856 } else {
8857 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
8858 BNX2X_DEV_INFO("Disable xmac Rx\n");
8859 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
8860 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
8861 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
8862 val & ~(1 << 1));
8863 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
8864 val | (1 << 1));
8865 REG_WR(bp, base_addr + XMAC_REG_CTRL, 0);
8866 mac_stopped = true;
8867 }
8868 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
8869 if (mask & reset_reg) {
8870 BNX2X_DEV_INFO("Disable umac Rx\n");
8871 base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
8872 REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0);
8873 mac_stopped = true;
8874 }
8875 }
8876
8877 if (mac_stopped)
8878 msleep(20);
8879
8880}
8881
8882#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
8883#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
8884#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
8885#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
8886
8887static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port,
8888 u8 inc)
8889{
8890 u16 rcq, bd;
8891 u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
8892
8893 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
8894 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
8895
8896 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
8897 REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg);
8898
8899 BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
8900 port, bd, rcq);
8901}
8902
8903static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp)
8904{
8905 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
8906 if (!rc) {
8907 BNX2X_ERR("MCP response failure, aborting\n");
8908 return -EBUSY;
8909 }
8910
8911 return 0;
8912}
8913
8914static bool __devinit bnx2x_prev_is_path_marked(struct bnx2x *bp)
8915{
8916 struct bnx2x_prev_path_list *tmp_list;
8917 int rc = false;
8918
8919 if (down_trylock(&bnx2x_prev_sem))
8920 return false;
8921
8922 list_for_each_entry(tmp_list, &bnx2x_prev_list, list) {
8923 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
8924 bp->pdev->bus->number == tmp_list->bus &&
8925 BP_PATH(bp) == tmp_list->path) {
8926 rc = true;
8927 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
8928 BP_PATH(bp));
8929 break;
8930 }
8931 }
8932
8933 up(&bnx2x_prev_sem);
8934
8935 return rc;
8936}
8937
8938static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
8939{
8940 struct bnx2x_prev_path_list *tmp_list;
8941 int rc;
8942
8943 tmp_list = (struct bnx2x_prev_path_list *)
8944 kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
8945 if (!tmp_list) {
8946 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
8947 return -ENOMEM;
8948 }
8949
8950 tmp_list->bus = bp->pdev->bus->number;
8951 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
8952 tmp_list->path = BP_PATH(bp);
8953
8954 rc = down_interruptible(&bnx2x_prev_sem);
8955 if (rc) {
8956 BNX2X_ERR("Received %d when tried to take lock\n", rc);
8957 kfree(tmp_list);
8958 } else {
8959 BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n",
8960 BP_PATH(bp));
8961 list_add(&tmp_list->list, &bnx2x_prev_list);
8962 up(&bnx2x_prev_sem);
8963 }
8964
8965 return rc;
8966}
8967
8968static bool __devinit bnx2x_can_flr(struct bnx2x *bp)
8969{
8970 int pos;
8971 u32 cap;
8972 struct pci_dev *dev = bp->pdev;
8973
8974 pos = pci_pcie_cap(dev);
8975 if (!pos)
8976 return false;
8977
8978 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
8979 if (!(cap & PCI_EXP_DEVCAP_FLR))
8980 return false;
8981
8982 return true;
8983}
8984
8985static int __devinit bnx2x_do_flr(struct bnx2x *bp)
8986{
8987 int i, pos;
8988 u16 status;
8989 struct pci_dev *dev = bp->pdev;
8990
8991 /* probe the capability first */
8992 if (bnx2x_can_flr(bp))
8993 return -ENOTTY;
8994
8995 pos = pci_pcie_cap(dev);
8996 if (!pos)
8997 return -ENOTTY;
8998
8999 /* Wait for Transaction Pending bit clean */
9000 for (i = 0; i < 4; i++) {
9001 if (i)
9002 msleep((1 << (i - 1)) * 100);
9003
9004 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
9005 if (!(status & PCI_EXP_DEVSTA_TRPND))
9006 goto clear;
9007 }
9008
9009 dev_err(&dev->dev,
9010 "transaction is not cleared; proceeding with reset anyway\n");
9011
9012clear:
9013 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
9014 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
9015 bp->common.bc_ver);
9016 return -EINVAL;
9017 }
9018
9019 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
9020
9021 return 0;
9022}
9023
9024static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp)
9025{
9026 int rc;
9027
9028 BNX2X_DEV_INFO("Uncommon unload Flow\n");
9029
9030 /* Test if previous unload process was already finished for this path */
9031 if (bnx2x_prev_is_path_marked(bp))
9032 return bnx2x_prev_mcp_done(bp);
9033
9034 /* If function has FLR capabilities, and existing FW version matches
9035 * the one required, then FLR will be sufficient to clean any residue
9036 * left by previous driver
9037 */
9038 if (bnx2x_test_firmware_version(bp, false) && bnx2x_can_flr(bp))
9039 return bnx2x_do_flr(bp);
9040
9041 /* Close the MCP request, return failure*/
9042 rc = bnx2x_prev_mcp_done(bp);
9043 if (!rc)
9044 rc = BNX2X_PREV_WAIT_NEEDED;
9045
9046 return rc;
9047}
9048
9049static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
9050{
9051 u32 reset_reg, tmp_reg = 0, rc;
9052 /* It is possible a previous function received 'common' answer,
9053 * but hasn't loaded yet, therefore creating a scenario of
9054 * multiple functions receiving 'common' on the same path.
9055 */
9056 BNX2X_DEV_INFO("Common unload Flow\n");
9057
9058 if (bnx2x_prev_is_path_marked(bp))
9059 return bnx2x_prev_mcp_done(bp);
9060
9061 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
9062
9063 /* Reset should be performed after BRB is emptied */
9064 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
9065 u32 timer_count = 1000;
9066 bool prev_undi = false;
9067
9068 /* Close the MAC Rx to prevent BRB from filling up */
9069 bnx2x_prev_unload_close_mac(bp);
9070
9071 /* Check if the UNDI driver was previously loaded
8828 * UNDI driver initializes CID offset for normal bell to 0x7 9072 * UNDI driver initializes CID offset for normal bell to 0x7
8829 */ 9073 */
8830 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 9074 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
8831 if (val == 0x7) { 9075 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
8832 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 9076 tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8833 /* save our pf_num */ 9077 if (tmp_reg == 0x7) {
8834 int orig_pf_num = bp->pf_num; 9078 BNX2X_DEV_INFO("UNDI previously loaded\n");
8835 int port; 9079 prev_undi = true;
8836 u32 swap_en, swap_val, value; 9080 /* clear the UNDI indication */
8837 9081 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8838 /* clear the UNDI indication */
8839 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8840
8841 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8842
8843 /* try unload UNDI on port 0 */
8844 bp->pf_num = 0;
8845 bp->fw_seq =
8846 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
8847 DRV_MSG_SEQ_NUMBER_MASK);
8848 reset_code = bnx2x_fw_command(bp, reset_code, 0);
8849
8850 /* if UNDI is loaded on the other port */
8851 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8852
8853 /* send "DONE" for previous unload */
8854 bnx2x_fw_command(bp,
8855 DRV_MSG_CODE_UNLOAD_DONE, 0);
8856
8857 /* unload UNDI on port 1 */
8858 bp->pf_num = 1;
8859 bp->fw_seq =
8860 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
8861 DRV_MSG_SEQ_NUMBER_MASK);
8862 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8863
8864 bnx2x_fw_command(bp, reset_code, 0);
8865 } 9082 }
9083 }
9084 /* wait until BRB is empty */
9085 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
9086 while (timer_count) {
9087 u32 prev_brb = tmp_reg;
8866 9088
8867 bnx2x_undi_int_disable(bp); 9089 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
8868 port = BP_PORT(bp); 9090 if (!tmp_reg)
8869 9091 break;
8870 /* close input traffic and wait for it */
8871 /* Do not rcv packets to BRB */
8872 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
8873 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8874 /* Do not direct rcv packets that are not for MCP to
8875 * the BRB */
8876 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8877 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8878 /* clear AEU */
8879 REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8880 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8881 msleep(10);
8882
8883 /* save NIG port swap info */
8884 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8885 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8886 /* reset device */
8887 REG_WR(bp,
8888 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8889 0xd3ffffff);
8890
8891 value = 0x1400;
8892 if (CHIP_IS_E3(bp)) {
8893 value |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
8894 value |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
8895 }
8896 9092
8897 REG_WR(bp, 9093 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
8898 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8899 value);
8900 9094
8901 /* take the NIG out of reset and restore swap values */ 9095 /* reset timer as long as BRB actually gets emptied */
8902 REG_WR(bp, 9096 if (prev_brb > tmp_reg)
8903 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 9097 timer_count = 1000;
8904 MISC_REGISTERS_RESET_REG_1_RST_NIG); 9098 else
8905 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val); 9099 timer_count--;
8906 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8907 9100
8908 /* send unload done to the MCP */ 9101 /* If UNDI resides in memory, manually increment it */
8909 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 9102 if (prev_undi)
9103 bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
8910 9104
8911 /* restore our func and fw_seq */ 9105 udelay(10);
8912 bp->pf_num = orig_pf_num;
8913 } 9106 }
9107
9108 if (!timer_count)
9109 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
9110
8914 } 9111 }
8915 9112
8916 /* now it's safe to release the lock */ 9113 /* No packets are in the pipeline, path is ready for reset */
8917 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 9114 bnx2x_reset_common(bp);
9115
9116 rc = bnx2x_prev_mark_path(bp);
9117 if (rc) {
9118 bnx2x_prev_mcp_done(bp);
9119 return rc;
9120 }
9121
9122 return bnx2x_prev_mcp_done(bp);
9123}
9124
9125static int __devinit bnx2x_prev_unload(struct bnx2x *bp)
9126{
9127 int time_counter = 10;
9128 u32 rc, fw, hw_lock_reg, hw_lock_val;
9129 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
9130
9131 /* Release previously held locks */
9132 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
9133 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
9134 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
9135
9136 hw_lock_val = (REG_RD(bp, hw_lock_reg));
9137 if (hw_lock_val) {
9138 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
9139 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
9140 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9141 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
9142 }
9143
9144 BNX2X_DEV_INFO("Release Previously held hw lock\n");
9145 REG_WR(bp, hw_lock_reg, 0xffffffff);
9146 } else
9147 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
9148
9149 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
9150 BNX2X_DEV_INFO("Release previously held alr\n");
9151 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
9152 }
9153
9154
9155 do {
9156 /* Lock MCP using an unload request */
9157 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
9158 if (!fw) {
9159 BNX2X_ERR("MCP response failure, aborting\n");
9160 rc = -EBUSY;
9161 break;
9162 }
9163
9164 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9165 rc = bnx2x_prev_unload_common(bp);
9166 break;
9167 }
9168
9169 /* non-common reply from MCP night require looping */
9170 rc = bnx2x_prev_unload_uncommon(bp);
9171 if (rc != BNX2X_PREV_WAIT_NEEDED)
9172 break;
9173
9174 msleep(20);
9175 } while (--time_counter);
9176
9177 if (!time_counter || rc) {
9178 BNX2X_ERR("Failed unloading previous driver, aborting\n");
9179 rc = -EBUSY;
9180 }
9181
9182 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
9183
9184 return rc;
8918} 9185}
8919 9186
8920static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) 9187static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
@@ -10100,8 +10367,16 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10100 func = BP_FUNC(bp); 10367 func = BP_FUNC(bp);
10101 10368
10102 /* need to reset chip if undi was active */ 10369 /* need to reset chip if undi was active */
10103 if (!BP_NOMCP(bp)) 10370 if (!BP_NOMCP(bp)) {
10104 bnx2x_undi_unload(bp); 10371 /* init fw_seq */
10372 bp->fw_seq =
10373 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
10374 DRV_MSG_SEQ_NUMBER_MASK;
10375 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10376
10377 bnx2x_prev_unload(bp);
10378 }
10379
10105 10380
10106 if (CHIP_REV_IS_FPGA(bp)) 10381 if (CHIP_REV_IS_FPGA(bp))
10107 dev_err(&bp->pdev->dev, "FPGA detected\n"); 10382 dev_err(&bp->pdev->dev, "FPGA detected\n");
@@ -11431,9 +11706,18 @@ static int __init bnx2x_init(void)
11431 11706
11432static void __exit bnx2x_cleanup(void) 11707static void __exit bnx2x_cleanup(void)
11433{ 11708{
11709 struct list_head *pos, *q;
11434 pci_unregister_driver(&bnx2x_pci_driver); 11710 pci_unregister_driver(&bnx2x_pci_driver);
11435 11711
11436 destroy_workqueue(bnx2x_wq); 11712 destroy_workqueue(bnx2x_wq);
11713
11714 /* Free globablly allocated resources */
11715 list_for_each_safe(pos, q, &bnx2x_prev_list) {
11716 struct bnx2x_prev_path_list *tmp =
11717 list_entry(pos, struct bnx2x_prev_path_list, list);
11718 list_del(pos);
11719 kfree(tmp);
11720 }
11437} 11721}
11438 11722
11439void bnx2x_notify_link_changed(struct bnx2x *bp) 11723void bnx2x_notify_link_changed(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index fd7fb4581849..c25803b9c0ca 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -987,6 +987,7 @@
987 * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */ 987 * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */
988#define IGU_REG_WRITE_DONE_PENDING 0x130480 988#define IGU_REG_WRITE_DONE_PENDING 0x130480
989#define MCP_A_REG_MCPR_SCRATCH 0x3a0000 989#define MCP_A_REG_MCPR_SCRATCH 0x3a0000
990#define MCP_REG_MCPR_ACCESS_LOCK 0x8009c
990#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c 991#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c
991#define MCP_REG_MCPR_GP_INPUTS 0x800c0 992#define MCP_REG_MCPR_GP_INPUTS 0x800c0
992#define MCP_REG_MCPR_GP_OENABLE 0x800c8 993#define MCP_REG_MCPR_GP_OENABLE 0x800c8
@@ -1686,6 +1687,7 @@
1686 [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13] 1687 [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13]
1687 Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16] 1688 Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16]
1688 rst_pxp_rq_rd_wr; 31:17] reserved */ 1689 rst_pxp_rq_rd_wr; 31:17] reserved */
1690#define MISC_REG_RESET_REG_1 0xa580
1689#define MISC_REG_RESET_REG_2 0xa590 1691#define MISC_REG_RESET_REG_2 0xa590
1690/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is 1692/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is
1691 shared with the driver resides */ 1693 shared with the driver resides */
@@ -5352,6 +5354,7 @@
5352#define XMAC_CTRL_REG_TX_EN (0x1<<0) 5354#define XMAC_CTRL_REG_TX_EN (0x1<<0)
5353#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18) 5355#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18)
5354#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17) 5356#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17)
5357#define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON (0x1<<1)
5355#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0) 5358#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0)
5356#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3) 5359#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3)
5357#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4) 5360#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4)
@@ -5606,6 +5609,7 @@
5606/* [RC 32] Parity register #0 read clear */ 5609/* [RC 32] Parity register #0 read clear */
5607#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128 5610#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128
5608#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138 5611#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138
5612#define MCPR_ACCESS_LOCK_LOCK (1L<<31)
5609#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) 5613#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
5610#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) 5614#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
5611#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) 5615#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
@@ -5732,6 +5736,7 @@
5732#define MISC_REGISTERS_GPIO_PORT_SHIFT 4 5736#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
5733#define MISC_REGISTERS_GPIO_SET_POS 8 5737#define MISC_REGISTERS_GPIO_SET_POS 8
5734#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 5738#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
5739#define MISC_REGISTERS_RESET_REG_1_RST_BRB1 (0x1<<0)
5735#define MISC_REGISTERS_RESET_REG_1_RST_DORQ (0x1<<19) 5740#define MISC_REGISTERS_RESET_REG_1_RST_DORQ (0x1<<19)
5736#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29) 5741#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
5737#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7) 5742#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
@@ -6816,10 +6821,13 @@ Theotherbitsarereservedandshouldbezero*/
6816 6821
6817#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020 6822#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020
6818#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0 6823#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
6824#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40
6819#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1 6825#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
6820#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4 6826#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
6821#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6 6827#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
6822#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9 6828#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
6829#define MDIO_AN_REG_8481_1G_100T_EXT_CTRL 0xfff0
6830#define MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF 0x0008
6823#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5 6831#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
6824#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7 6832#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
6825#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8 6833#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8
@@ -6939,6 +6947,10 @@ Theotherbitsarereservedandshouldbezero*/
6939#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2 6947#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2
6940#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3 6948#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3
6941#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4 6949#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4
6950#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL 0x1000
6951#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CMPL 0x0100
6952#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP 0x0010
6953#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CAP 0x1
6942#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE 6954#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE
6943#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0 6955#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0
6944#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2 6956#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 3f52fadee3ed..513573321625 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -3847,7 +3847,7 @@ static bool bnx2x_credit_pool_get_entry(
3847 continue; 3847 continue;
3848 3848
3849 /* If we've got here we are going to find a free entry */ 3849 /* If we've got here we are going to find a free entry */
3850 for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0; 3850 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3851 i < BIT_VEC64_ELEM_SZ; idx++, i++) 3851 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3852 3852
3853 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { 3853 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 4e4bb3875868..062ac333fde6 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -2778,7 +2778,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2778 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 2778 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2779 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 2779 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2780 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && 2780 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2781 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) 2781 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2782 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2783 !tp->pci_fn))
2782 return; 2784 return;
2783 2785
2784 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || 2786 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 05ff076af06d..b126b98065a9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2000,13 +2000,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
2000/* 2000/*
2001 * debugfs support 2001 * debugfs support
2002 */ 2002 */
2003
2004static int mem_open(struct inode *inode, struct file *file)
2005{
2006 file->private_data = inode->i_private;
2007 return 0;
2008}
2009
2010static ssize_t mem_read(struct file *file, char __user *buf, size_t count, 2003static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2011 loff_t *ppos) 2004 loff_t *ppos)
2012{ 2005{
@@ -2050,7 +2043,7 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2050 2043
2051static const struct file_operations mem_debugfs_fops = { 2044static const struct file_operations mem_debugfs_fops = {
2052 .owner = THIS_MODULE, 2045 .owner = THIS_MODULE,
2053 .open = mem_open, 2046 .open = simple_open,
2054 .read = mem_read, 2047 .read = mem_read,
2055 .llseek = default_llseek, 2048 .llseek = default_llseek,
2056}; 2049};
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 9eb815941df5..f7f0bf5d037b 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -356,13 +356,13 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
356 356
357 if (prop) 357 if (prop)
358 tbiaddr = *prop; 358 tbiaddr = *prop;
359 }
360 359
361 if (tbiaddr == -1) { 360 if (tbiaddr == -1) {
362 err = -EBUSY; 361 err = -EBUSY;
363 goto err_free_irqs; 362 goto err_free_irqs;
364 } else { 363 } else {
365 out_be32(tbipa, tbiaddr); 364 out_be32(tbipa, tbiaddr);
365 }
366 } 366 }
367 367
368 err = of_mdiobus_register(new_bus, np); 368 err = of_mdiobus_register(new_bus, np);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 4e3cd2f8debb..17a46e76123f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3945,6 +3945,8 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3945 } 3945 }
3946 3946
3947 if (max_speed == SPEED_1000) { 3947 if (max_speed == SPEED_1000) {
3948 unsigned int snums = qe_get_num_of_snums();
3949
3948 /* configure muram FIFOs for gigabit operation */ 3950 /* configure muram FIFOs for gigabit operation */
3949 ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; 3951 ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
3950 ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; 3952 ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
@@ -3954,11 +3956,11 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3954 ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT; 3956 ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
3955 ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4; 3957 ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4;
3956 3958
3957 /* If QE's snum number is 46 which means we need to support 3959 /* If QE's snum number is 46/76 which means we need to support
3958 * 4 UECs at 1000Base-T simultaneously, we need to allocate 3960 * 4 UECs at 1000Base-T simultaneously, we need to allocate
3959 * more Threads to Rx. 3961 * more Threads to Rx.
3960 */ 3962 */
3961 if (qe_get_num_of_snums() == 46) 3963 if ((snums == 76) || (snums == 46))
3962 ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6; 3964 ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6;
3963 else 3965 else
3964 ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4; 3966 ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 0e9aec8f6917..4348b6fd44fa 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -164,6 +164,8 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
164static bool e1000_vlan_used(struct e1000_adapter *adapter); 164static bool e1000_vlan_used(struct e1000_adapter *adapter);
165static void e1000_vlan_mode(struct net_device *netdev, 165static void e1000_vlan_mode(struct net_device *netdev,
166 netdev_features_t features); 166 netdev_features_t features);
167static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
168 bool filter_on);
167static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); 169static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
168static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); 170static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
169static void e1000_restore_vlan(struct e1000_adapter *adapter); 171static void e1000_restore_vlan(struct e1000_adapter *adapter);
@@ -215,7 +217,8 @@ MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
215MODULE_LICENSE("GPL"); 217MODULE_LICENSE("GPL");
216MODULE_VERSION(DRV_VERSION); 218MODULE_VERSION(DRV_VERSION);
217 219
218static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; 220#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
221static int debug = -1;
219module_param(debug, int, 0); 222module_param(debug, int, 0);
220MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 223MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
221 224
@@ -979,7 +982,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
979 adapter = netdev_priv(netdev); 982 adapter = netdev_priv(netdev);
980 adapter->netdev = netdev; 983 adapter->netdev = netdev;
981 adapter->pdev = pdev; 984 adapter->pdev = pdev;
982 adapter->msg_enable = (1 << debug) - 1; 985 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
983 adapter->bars = bars; 986 adapter->bars = bars;
984 adapter->need_ioport = need_ioport; 987 adapter->need_ioport = need_ioport;
985 988
@@ -1214,7 +1217,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1214 if (err) 1217 if (err)
1215 goto err_register; 1218 goto err_register;
1216 1219
1217 e1000_vlan_mode(netdev, netdev->features); 1220 e1000_vlan_filter_on_off(adapter, false);
1218 1221
1219 /* print bus type/speed/width info */ 1222 /* print bus type/speed/width info */
1220 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", 1223 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
@@ -4770,6 +4773,22 @@ static bool e1000_vlan_used(struct e1000_adapter *adapter)
4770 return false; 4773 return false;
4771} 4774}
4772 4775
4776static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4777 netdev_features_t features)
4778{
4779 struct e1000_hw *hw = &adapter->hw;
4780 u32 ctrl;
4781
4782 ctrl = er32(CTRL);
4783 if (features & NETIF_F_HW_VLAN_RX) {
4784 /* enable VLAN tag insert/strip */
4785 ctrl |= E1000_CTRL_VME;
4786 } else {
4787 /* disable VLAN tag insert/strip */
4788 ctrl &= ~E1000_CTRL_VME;
4789 }
4790 ew32(CTRL, ctrl);
4791}
4773static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, 4792static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4774 bool filter_on) 4793 bool filter_on)
4775{ 4794{
@@ -4779,6 +4798,7 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4779 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4798 if (!test_bit(__E1000_DOWN, &adapter->flags))
4780 e1000_irq_disable(adapter); 4799 e1000_irq_disable(adapter);
4781 4800
4801 __e1000_vlan_mode(adapter, adapter->netdev->features);
4782 if (filter_on) { 4802 if (filter_on) {
4783 /* enable VLAN receive filtering */ 4803 /* enable VLAN receive filtering */
4784 rctl = er32(RCTL); 4804 rctl = er32(RCTL);
@@ -4799,24 +4819,14 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4799} 4819}
4800 4820
4801static void e1000_vlan_mode(struct net_device *netdev, 4821static void e1000_vlan_mode(struct net_device *netdev,
4802 netdev_features_t features) 4822 netdev_features_t features)
4803{ 4823{
4804 struct e1000_adapter *adapter = netdev_priv(netdev); 4824 struct e1000_adapter *adapter = netdev_priv(netdev);
4805 struct e1000_hw *hw = &adapter->hw;
4806 u32 ctrl;
4807 4825
4808 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4826 if (!test_bit(__E1000_DOWN, &adapter->flags))
4809 e1000_irq_disable(adapter); 4827 e1000_irq_disable(adapter);
4810 4828
4811 ctrl = er32(CTRL); 4829 __e1000_vlan_mode(adapter, features);
4812 if (features & NETIF_F_HW_VLAN_RX) {
4813 /* enable VLAN tag insert/strip */
4814 ctrl |= E1000_CTRL_VME;
4815 } else {
4816 /* disable VLAN tag insert/strip */
4817 ctrl &= ~E1000_CTRL_VME;
4818 }
4819 ew32(CTRL, ctrl);
4820 4830
4821 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4831 if (!test_bit(__E1000_DOWN, &adapter->flags))
4822 e1000_irq_enable(adapter); 4832 e1000_irq_enable(adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 86cdd4793992..b83897f76ee3 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -161,6 +161,12 @@ struct e1000_info;
161/* Time to wait before putting the device into D3 if there's no link (in ms). */ 161/* Time to wait before putting the device into D3 if there's no link (in ms). */
162#define LINK_TIMEOUT 100 162#define LINK_TIMEOUT 100
163 163
164/*
165 * Count for polling __E1000_RESET condition every 10-20msec.
166 * Experimentation has shown the reset can take approximately 210msec.
167 */
168#define E1000_CHECK_RESET_COUNT 25
169
164#define DEFAULT_RDTR 0 170#define DEFAULT_RDTR 0
165#define DEFAULT_RADV 8 171#define DEFAULT_RADV 8
166#define BURST_RDTR 0x20 172#define BURST_RDTR 0x20
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 64c76443a7aa..b461c24945e3 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1310,10 +1310,6 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1310 1310
1311 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) 1311 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1312 oem_reg |= HV_OEM_BITS_LPLU; 1312 oem_reg |= HV_OEM_BITS_LPLU;
1313
1314 /* Set Restart auto-neg to activate the bits */
1315 if (!hw->phy.ops.check_reset_block(hw))
1316 oem_reg |= HV_OEM_BITS_RESTART_AN;
1317 } else { 1313 } else {
1318 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | 1314 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
1319 E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) 1315 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
@@ -1324,6 +1320,11 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1324 oem_reg |= HV_OEM_BITS_LPLU; 1320 oem_reg |= HV_OEM_BITS_LPLU;
1325 } 1321 }
1326 1322
1323 /* Set Restart auto-neg to activate the bits */
1324 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
1325 !hw->phy.ops.check_reset_block(hw))
1326 oem_reg |= HV_OEM_BITS_RESTART_AN;
1327
1327 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); 1328 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1328 1329
1329release: 1330release:
@@ -3682,7 +3683,11 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3682 3683
3683 if (hw->mac.type >= e1000_pchlan) { 3684 if (hw->mac.type >= e1000_pchlan) {
3684 e1000_oem_bits_config_ich8lan(hw, false); 3685 e1000_oem_bits_config_ich8lan(hw, false);
3685 e1000_phy_hw_reset_ich8lan(hw); 3686
3687 /* Reset PHY to activate OEM bits on 82577/8 */
3688 if (hw->mac.type == e1000_pchlan)
3689 e1000e_phy_hw_reset_generic(hw);
3690
3686 ret_val = hw->phy.ops.acquire(hw); 3691 ret_val = hw->phy.ops.acquire(hw);
3687 if (ret_val) 3692 if (ret_val)
3688 return; 3693 return;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 7152eb11b7b9..19ab2154802c 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -60,6 +60,11 @@
60char e1000e_driver_name[] = "e1000e"; 60char e1000e_driver_name[] = "e1000e";
61const char e1000e_driver_version[] = DRV_VERSION; 61const char e1000e_driver_version[] = DRV_VERSION;
62 62
63#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
64static int debug = -1;
65module_param(debug, int, 0);
66MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
67
63static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); 68static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
64 69
65static const struct e1000_info *e1000_info_tbl[] = { 70static const struct e1000_info *e1000_info_tbl[] = {
@@ -1054,6 +1059,13 @@ static void e1000_print_hw_hang(struct work_struct *work)
1054 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 1059 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1055 /* execute the writes immediately */ 1060 /* execute the writes immediately */
1056 e1e_flush(); 1061 e1e_flush();
1062 /*
1063 * Due to rare timing issues, write to TIDV again to ensure
1064 * the write is successful
1065 */
1066 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1067 /* execute the writes immediately */
1068 e1e_flush();
1057 adapter->tx_hang_recheck = true; 1069 adapter->tx_hang_recheck = true;
1058 return; 1070 return;
1059 } 1071 }
@@ -3611,6 +3623,16 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3611 3623
3612 /* execute the writes immediately */ 3624 /* execute the writes immediately */
3613 e1e_flush(); 3625 e1e_flush();
3626
3627 /*
3628 * due to rare timing issues, write to TIDV/RDTR again to ensure the
3629 * write is successful
3630 */
3631 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3632 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3633
3634 /* execute the writes immediately */
3635 e1e_flush();
3614} 3636}
3615 3637
3616static void e1000e_update_stats(struct e1000_adapter *adapter); 3638static void e1000e_update_stats(struct e1000_adapter *adapter);
@@ -3963,6 +3985,10 @@ static int e1000_close(struct net_device *netdev)
3963{ 3985{
3964 struct e1000_adapter *adapter = netdev_priv(netdev); 3986 struct e1000_adapter *adapter = netdev_priv(netdev);
3965 struct pci_dev *pdev = adapter->pdev; 3987 struct pci_dev *pdev = adapter->pdev;
3988 int count = E1000_CHECK_RESET_COUNT;
3989
3990 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
3991 usleep_range(10000, 20000);
3966 3992
3967 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 3993 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3968 3994
@@ -5467,6 +5493,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5467 netif_device_detach(netdev); 5493 netif_device_detach(netdev);
5468 5494
5469 if (netif_running(netdev)) { 5495 if (netif_running(netdev)) {
5496 int count = E1000_CHECK_RESET_COUNT;
5497
5498 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
5499 usleep_range(10000, 20000);
5500
5470 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 5501 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5471 e1000e_down(adapter); 5502 e1000e_down(adapter);
5472 e1000_free_irq(adapter); 5503 e1000_free_irq(adapter);
@@ -6172,7 +6203,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6172 adapter->hw.adapter = adapter; 6203 adapter->hw.adapter = adapter;
6173 adapter->hw.mac.type = ei->mac; 6204 adapter->hw.mac.type = ei->mac;
6174 adapter->max_hw_frame_size = ei->max_hw_frame_size; 6205 adapter->max_hw_frame_size = ei->max_hw_frame_size;
6175 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; 6206 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
6176 6207
6177 mmio_start = pci_resource_start(pdev, 0); 6208 mmio_start = pci_resource_start(pdev, 0);
6178 mmio_len = pci_resource_len(pdev, 0); 6209 mmio_len = pci_resource_len(pdev, 0);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c4902411d749..5ec31598ee47 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -238,6 +238,11 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
238MODULE_LICENSE("GPL"); 238MODULE_LICENSE("GPL");
239MODULE_VERSION(DRV_VERSION); 239MODULE_VERSION(DRV_VERSION);
240 240
241#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
242static int debug = -1;
243module_param(debug, int, 0);
244MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
245
241struct igb_reg_info { 246struct igb_reg_info {
242 u32 ofs; 247 u32 ofs;
243 char *name; 248 char *name;
@@ -1893,7 +1898,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1893 adapter->pdev = pdev; 1898 adapter->pdev = pdev;
1894 hw = &adapter->hw; 1899 hw = &adapter->hw;
1895 hw->back = adapter; 1900 hw->back = adapter;
1896 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE; 1901 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
1897 1902
1898 mmio_start = pci_resource_start(pdev, 0); 1903 mmio_start = pci_resource_start(pdev, 0);
1899 mmio_len = pci_resource_len(pdev, 0); 1904 mmio_len = pci_resource_len(pdev, 0);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 217c143686d2..d61ca2a732f0 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -55,6 +55,11 @@ static const char igbvf_driver_string[] =
55static const char igbvf_copyright[] = 55static const char igbvf_copyright[] =
56 "Copyright (c) 2009 - 2012 Intel Corporation."; 56 "Copyright (c) 2009 - 2012 Intel Corporation.";
57 57
58#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
59static int debug = -1;
60module_param(debug, int, 0);
61MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
62
58static int igbvf_poll(struct napi_struct *napi, int budget); 63static int igbvf_poll(struct napi_struct *napi, int budget);
59static void igbvf_reset(struct igbvf_adapter *); 64static void igbvf_reset(struct igbvf_adapter *);
60static void igbvf_set_interrupt_capability(struct igbvf_adapter *); 65static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
@@ -2649,7 +2654,7 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2649 adapter->flags = ei->flags; 2654 adapter->flags = ei->flags;
2650 adapter->hw.back = adapter; 2655 adapter->hw.back = adapter;
2651 adapter->hw.mac.type = ei->mac; 2656 adapter->hw.mac.type = ei->mac;
2652 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; 2657 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2653 2658
2654 /* PCI config space info */ 2659 /* PCI config space info */
2655 2660
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 82aaa792cbf3..5fce363d810a 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -134,8 +134,8 @@ MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
134MODULE_LICENSE("GPL"); 134MODULE_LICENSE("GPL");
135MODULE_VERSION(DRV_VERSION); 135MODULE_VERSION(DRV_VERSION);
136 136
137#define DEFAULT_DEBUG_LEVEL_SHIFT 3 137#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
138static int debug = DEFAULT_DEBUG_LEVEL_SHIFT; 138static int debug = -1;
139module_param(debug, int, 0); 139module_param(debug, int, 0);
140MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 140MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
141 141
@@ -442,7 +442,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
442 adapter->netdev = netdev; 442 adapter->netdev = netdev;
443 adapter->pdev = pdev; 443 adapter->pdev = pdev;
444 adapter->hw.back = adapter; 444 adapter->hw.back = adapter;
445 adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT); 445 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
446 446
447 adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0); 447 adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
448 if (!adapter->hw.hw_addr) { 448 if (!adapter->hw.hw_addr) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 80e26ff30ebf..74e192107f9a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -544,7 +544,7 @@ struct ixgbe_fdir_filter {
544 u16 action; 544 u16 action;
545}; 545};
546 546
547enum ixbge_state_t { 547enum ixgbe_state_t {
548 __IXGBE_TESTING, 548 __IXGBE_TESTING,
549 __IXGBE_RESETTING, 549 __IXGBE_RESETTING,
550 __IXGBE_DOWN, 550 __IXGBE_DOWN,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index dde65f951400..652e4b09546d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -44,62 +44,94 @@
44#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ 44#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */
45#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ 45#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */
46 46
47int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, 47int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *scfg,
48 struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max) 48 struct ixgbe_dcb_config *dcfg, int tc_max)
49{ 49{
50 struct tc_configuration *src_tc_cfg = NULL; 50 struct tc_configuration *src = NULL;
51 struct tc_configuration *dst_tc_cfg = NULL; 51 struct tc_configuration *dst = NULL;
52 int i; 52 int i, j;
53 int tx = DCB_TX_CONFIG;
54 int rx = DCB_RX_CONFIG;
55 int changes = 0;
53 56
54 if (!src_dcb_cfg || !dst_dcb_cfg) 57 if (!scfg || !dcfg)
55 return -EINVAL; 58 return changes;
56 59
57 for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { 60 for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
58 src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; 61 src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0];
59 dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; 62 dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0];
60 63
61 dst_tc_cfg->path[DCB_TX_CONFIG].prio_type = 64 if (dst->path[tx].prio_type != src->path[tx].prio_type) {
62 src_tc_cfg->path[DCB_TX_CONFIG].prio_type; 65 dst->path[tx].prio_type = src->path[tx].prio_type;
66 changes |= BIT_PG_TX;
67 }
63 68
64 dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id = 69 if (dst->path[tx].bwg_id != src->path[tx].bwg_id) {
65 src_tc_cfg->path[DCB_TX_CONFIG].bwg_id; 70 dst->path[tx].bwg_id = src->path[tx].bwg_id;
71 changes |= BIT_PG_TX;
72 }
66 73
67 dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent = 74 if (dst->path[tx].bwg_percent != src->path[tx].bwg_percent) {
68 src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent; 75 dst->path[tx].bwg_percent = src->path[tx].bwg_percent;
76 changes |= BIT_PG_TX;
77 }
69 78
70 dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap = 79 if (dst->path[tx].up_to_tc_bitmap !=
71 src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap; 80 src->path[tx].up_to_tc_bitmap) {
81 dst->path[tx].up_to_tc_bitmap =
82 src->path[tx].up_to_tc_bitmap;
83 changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG);
84 }
72 85
73 dst_tc_cfg->path[DCB_RX_CONFIG].prio_type = 86 if (dst->path[rx].prio_type != src->path[rx].prio_type) {
74 src_tc_cfg->path[DCB_RX_CONFIG].prio_type; 87 dst->path[rx].prio_type = src->path[rx].prio_type;
88 changes |= BIT_PG_RX;
89 }
75 90
76 dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id = 91 if (dst->path[rx].bwg_id != src->path[rx].bwg_id) {
77 src_tc_cfg->path[DCB_RX_CONFIG].bwg_id; 92 dst->path[rx].bwg_id = src->path[rx].bwg_id;
93 changes |= BIT_PG_RX;
94 }
78 95
79 dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent = 96 if (dst->path[rx].bwg_percent != src->path[rx].bwg_percent) {
80 src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent; 97 dst->path[rx].bwg_percent = src->path[rx].bwg_percent;
98 changes |= BIT_PG_RX;
99 }
81 100
82 dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap = 101 if (dst->path[rx].up_to_tc_bitmap !=
83 src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap; 102 src->path[rx].up_to_tc_bitmap) {
103 dst->path[rx].up_to_tc_bitmap =
104 src->path[rx].up_to_tc_bitmap;
105 changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG);
106 }
84 } 107 }
85 108
86 for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { 109 for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) {
87 dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG] 110 j = i - DCB_PG_ATTR_BW_ID_0;
88 [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage 111 if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) {
89 [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; 112 dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j];
90 dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG] 113 changes |= BIT_PG_TX;
91 [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage 114 }
92 [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; 115 if (dcfg->bw_percentage[rx][j] != scfg->bw_percentage[rx][j]) {
116 dcfg->bw_percentage[rx][j] = scfg->bw_percentage[rx][j];
117 changes |= BIT_PG_RX;
118 }
93 } 119 }
94 120
95 for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { 121 for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) {
96 dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc = 122 j = i - DCB_PFC_UP_ATTR_0;
97 src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc; 123 if (dcfg->tc_config[j].dcb_pfc != scfg->tc_config[j].dcb_pfc) {
124 dcfg->tc_config[j].dcb_pfc = scfg->tc_config[j].dcb_pfc;
125 changes |= BIT_PFC;
126 }
98 } 127 }
99 128
100 dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable; 129 if (dcfg->pfc_mode_enable != scfg->pfc_mode_enable) {
130 dcfg->pfc_mode_enable = scfg->pfc_mode_enable;
131 changes |= BIT_PFC;
132 }
101 133
102 return 0; 134 return changes;
103} 135}
104 136
105static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) 137static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
@@ -179,20 +211,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
179 if (up_map != DCB_ATTR_VALUE_UNDEFINED) 211 if (up_map != DCB_ATTR_VALUE_UNDEFINED)
180 adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = 212 adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap =
181 up_map; 213 up_map;
182
183 if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type !=
184 adapter->dcb_cfg.tc_config[tc].path[0].prio_type) ||
185 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id !=
186 adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) ||
187 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
188 adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
189 (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
190 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
191 adapter->dcb_set_bitmap |= BIT_PG_TX;
192
193 if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
194 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)
195 adapter->dcb_set_bitmap |= BIT_PFC | BIT_APP_UPCHG;
196} 214}
197 215
198static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, 216static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -201,10 +219,6 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
201 struct ixgbe_adapter *adapter = netdev_priv(netdev); 219 struct ixgbe_adapter *adapter = netdev_priv(netdev);
202 220
203 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; 221 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
204
205 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
206 adapter->dcb_cfg.bw_percentage[0][bwg_id])
207 adapter->dcb_set_bitmap |= BIT_PG_TX;
208} 222}
209 223
210static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, 224static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
@@ -223,20 +237,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
223 if (up_map != DCB_ATTR_VALUE_UNDEFINED) 237 if (up_map != DCB_ATTR_VALUE_UNDEFINED)
224 adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = 238 adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap =
225 up_map; 239 up_map;
226
227 if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type !=
228 adapter->dcb_cfg.tc_config[tc].path[1].prio_type) ||
229 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id !=
230 adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) ||
231 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
232 adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
233 (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
234 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
235 adapter->dcb_set_bitmap |= BIT_PG_RX;
236
237 if (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
238 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)
239 adapter->dcb_set_bitmap |= BIT_PFC;
240} 240}
241 241
242static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, 242static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -245,10 +245,6 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
245 struct ixgbe_adapter *adapter = netdev_priv(netdev); 245 struct ixgbe_adapter *adapter = netdev_priv(netdev);
246 246
247 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; 247 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
248
249 if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
250 adapter->dcb_cfg.bw_percentage[1][bwg_id])
251 adapter->dcb_set_bitmap |= BIT_PG_RX;
252} 248}
253 249
254static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, 250static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -298,10 +294,8 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
298 294
299 adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; 295 adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
300 if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != 296 if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
301 adapter->dcb_cfg.tc_config[priority].dcb_pfc) { 297 adapter->dcb_cfg.tc_config[priority].dcb_pfc)
302 adapter->dcb_set_bitmap |= BIT_PFC;
303 adapter->temp_dcb_cfg.pfc_mode_enable = true; 298 adapter->temp_dcb_cfg.pfc_mode_enable = true;
304 }
305} 299}
306 300
307static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, 301static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
@@ -336,7 +330,8 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev)
336static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) 330static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
337{ 331{
338 struct ixgbe_adapter *adapter = netdev_priv(netdev); 332 struct ixgbe_adapter *adapter = netdev_priv(netdev);
339 int ret, i; 333 int ret = DCB_NO_HW_CHG;
334 int i;
340#ifdef IXGBE_FCOE 335#ifdef IXGBE_FCOE
341 struct dcb_app app = { 336 struct dcb_app app = {
342 .selector = DCB_APP_IDTYPE_ETHTYPE, 337 .selector = DCB_APP_IDTYPE_ETHTYPE,
@@ -355,12 +350,13 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
355 350
356 /* Fail command if not in CEE mode */ 351 /* Fail command if not in CEE mode */
357 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 352 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
358 return 1; 353 return ret;
359 354
360 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, 355 adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg,
361 MAX_TRAFFIC_CLASS); 356 &adapter->dcb_cfg,
362 if (ret) 357 MAX_TRAFFIC_CLASS);
363 return DCB_NO_HW_CHG; 358 if (!adapter->dcb_set_bitmap)
359 return ret;
364 360
365 if (adapter->dcb_cfg.pfc_mode_enable) { 361 if (adapter->dcb_cfg.pfc_mode_enable) {
366 switch (adapter->hw.mac.type) { 362 switch (adapter->hw.mac.type) {
@@ -420,6 +416,8 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
420 416
421 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 417 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
422 netdev_set_prio_tc_map(netdev, i, prio_tc[i]); 418 netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
419
420 ret = DCB_HW_CHG_RST;
423 } 421 }
424 422
425 if (adapter->dcb_set_bitmap & BIT_PFC) { 423 if (adapter->dcb_set_bitmap & BIT_PFC) {
@@ -430,7 +428,8 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
430 DCB_TX_CONFIG, prio_tc); 428 DCB_TX_CONFIG, prio_tc);
431 ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en); 429 ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
432 ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc); 430 ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc);
433 ret = DCB_HW_CHG; 431 if (ret != DCB_HW_CHG_RST)
432 ret = DCB_HW_CHG;
434 } 433 }
435 434
436 if (adapter->dcb_cfg.pfc_mode_enable) 435 if (adapter->dcb_cfg.pfc_mode_enable)
@@ -531,9 +530,6 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
531 struct ixgbe_adapter *adapter = netdev_priv(netdev); 530 struct ixgbe_adapter *adapter = netdev_priv(netdev);
532 531
533 adapter->temp_dcb_cfg.pfc_mode_enable = state; 532 adapter->temp_dcb_cfg.pfc_mode_enable = state;
534 if (adapter->temp_dcb_cfg.pfc_mode_enable !=
535 adapter->dcb_cfg.pfc_mode_enable)
536 adapter->dcb_set_bitmap |= BIT_PFC;
537} 533}
538 534
539/** 535/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 027d7a75be39..ed1b47dc0834 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -622,6 +622,16 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
622 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 622 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
623 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); 623 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
624 624
625#ifdef IXGBE_FCOE
626 if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
627 struct ixgbe_ring_feature *f;
628 f = &adapter->ring_feature[RING_F_FCOE];
629 if ((rxr_idx >= f->mask) &&
630 (rxr_idx < f->mask + f->indices))
631 set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state);
632 }
633
634#endif /* IXGBE_FCOE */
625 /* apply Rx specific ring traits */ 635 /* apply Rx specific ring traits */
626 ring->count = adapter->rx_ring_count; 636 ring->count = adapter->rx_ring_count;
627 ring->queue_index = rxr_idx; 637 ring->queue_index = rxr_idx;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 398fc223cab9..a7f3cd872caf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -63,8 +63,8 @@ static char ixgbe_default_device_descr[] =
63 "Intel(R) 10 Gigabit Network Connection"; 63 "Intel(R) 10 Gigabit Network Connection";
64#endif 64#endif
65#define MAJ 3 65#define MAJ 3
66#define MIN 6 66#define MIN 8
67#define BUILD 7 67#define BUILD 21
68#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 68#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
69 __stringify(BUILD) "-k" 69 __stringify(BUILD) "-k"
70const char ixgbe_driver_version[] = DRV_VERSION; 70const char ixgbe_driver_version[] = DRV_VERSION;
@@ -141,13 +141,16 @@ module_param(allow_unsupported_sfp, uint, 0);
141MODULE_PARM_DESC(allow_unsupported_sfp, 141MODULE_PARM_DESC(allow_unsupported_sfp,
142 "Allow unsupported and untested SFP+ modules on 82599-based adapters"); 142 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
143 143
144#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
145static int debug = -1;
146module_param(debug, int, 0);
147MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
148
144MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 149MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
145MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); 150MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
146MODULE_LICENSE("GPL"); 151MODULE_LICENSE("GPL");
147MODULE_VERSION(DRV_VERSION); 152MODULE_VERSION(DRV_VERSION);
148 153
149#define DEFAULT_DEBUG_LEVEL_SHIFT 3
150
151static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) 154static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
152{ 155{
153 if (!test_bit(__IXGBE_DOWN, &adapter->state) && 156 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
@@ -3151,14 +3154,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3151 set_ring_rsc_enabled(rx_ring); 3154 set_ring_rsc_enabled(rx_ring);
3152 else 3155 else
3153 clear_ring_rsc_enabled(rx_ring); 3156 clear_ring_rsc_enabled(rx_ring);
3154#ifdef IXGBE_FCOE
3155 if (netdev->features & NETIF_F_FCOE_MTU) {
3156 struct ixgbe_ring_feature *f;
3157 f = &adapter->ring_feature[RING_F_FCOE];
3158 if ((i >= f->mask) && (i < f->mask + f->indices))
3159 set_bit(__IXGBE_RX_FCOE_BUFSZ, &rx_ring->state);
3160 }
3161#endif /* IXGBE_FCOE */
3162 } 3157 }
3163} 3158}
3164 3159
@@ -4833,7 +4828,9 @@ static int ixgbe_resume(struct pci_dev *pdev)
4833 4828
4834 pci_wake_from_d3(pdev, false); 4829 pci_wake_from_d3(pdev, false);
4835 4830
4831 rtnl_lock();
4836 err = ixgbe_init_interrupt_scheme(adapter); 4832 err = ixgbe_init_interrupt_scheme(adapter);
4833 rtnl_unlock();
4837 if (err) { 4834 if (err) {
4838 e_dev_err("Cannot initialize interrupts for device\n"); 4835 e_dev_err("Cannot initialize interrupts for device\n");
4839 return err; 4836 return err;
@@ -4890,6 +4887,16 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
4890 if (wufc) { 4887 if (wufc) {
4891 ixgbe_set_rx_mode(netdev); 4888 ixgbe_set_rx_mode(netdev);
4892 4889
4890 /*
4891 * enable the optics for both mult-speed fiber and
4892 * 82599 SFP+ fiber as we can WoL.
4893 */
4894 if (hw->mac.ops.enable_tx_laser &&
4895 (hw->phy.multispeed_fiber ||
4896 (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber &&
4897 hw->mac.type == ixgbe_mac_82599EB)))
4898 hw->mac.ops.enable_tx_laser(hw);
4899
4893 /* turn on all-multi mode if wake on multicast is enabled */ 4900 /* turn on all-multi mode if wake on multicast is enabled */
4894 if (wufc & IXGBE_WUFC_MC) { 4901 if (wufc & IXGBE_WUFC_MC) {
4895 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4902 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@@ -6834,7 +6841,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6834 adapter->pdev = pdev; 6841 adapter->pdev = pdev;
6835 hw = &adapter->hw; 6842 hw = &adapter->hw;
6836 hw->back = adapter; 6843 hw->back = adapter;
6837 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 6844 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
6838 6845
6839 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 6846 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
6840 pci_resource_len(pdev, 0)); 6847 pci_resource_len(pdev, 0));
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 581c65976bb4..307611ae831d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -91,7 +91,10 @@ MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
91MODULE_LICENSE("GPL"); 91MODULE_LICENSE("GPL");
92MODULE_VERSION(DRV_VERSION); 92MODULE_VERSION(DRV_VERSION);
93 93
94#define DEFAULT_DEBUG_LEVEL_SHIFT 3 94#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
95static int debug = -1;
96module_param(debug, int, 0);
97MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
95 98
96/* forward decls */ 99/* forward decls */
97static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector); 100static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
@@ -3367,7 +3370,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3367 adapter->pdev = pdev; 3370 adapter->pdev = pdev;
3368 hw = &adapter->hw; 3371 hw = &adapter->hw;
3369 hw->back = adapter; 3372 hw->back = adapter;
3370 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3373 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3371 3374
3372 /* 3375 /*
3373 * call save state here in standalone driver because it relies on 3376 * call save state here in standalone driver because it relies on
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 423a1a2a702e..c9b504e2dfc3 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1767,13 +1767,14 @@ static int sky2_open(struct net_device *dev)
1767 1767
1768 sky2_hw_up(sky2); 1768 sky2_hw_up(sky2);
1769 1769
1770 /* Enable interrupts from phy/mac for port */
1771 imask = sky2_read32(hw, B0_IMSK);
1772
1770 if (hw->chip_id == CHIP_ID_YUKON_OPT || 1773 if (hw->chip_id == CHIP_ID_YUKON_OPT ||
1771 hw->chip_id == CHIP_ID_YUKON_PRM || 1774 hw->chip_id == CHIP_ID_YUKON_PRM ||
1772 hw->chip_id == CHIP_ID_YUKON_OP_2) 1775 hw->chip_id == CHIP_ID_YUKON_OP_2)
1773 imask |= Y2_IS_PHY_QLNK; /* enable PHY Quick Link */ 1776 imask |= Y2_IS_PHY_QLNK; /* enable PHY Quick Link */
1774 1777
1775 /* Enable interrupts from phy/mac for port */
1776 imask = sky2_read32(hw, B0_IMSK);
1777 imask |= portirq_msk[port]; 1778 imask |= portirq_msk[port];
1778 sky2_write32(hw, B0_IMSK, imask); 1779 sky2_write32(hw, B0_IMSK, imask);
1779 sky2_read32(hw, B0_IMSK); 1780 sky2_read32(hw, B0_IMSK);
@@ -2468,6 +2469,17 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
2468 return err; 2469 return err;
2469} 2470}
2470 2471
2472static inline bool needs_copy(const struct rx_ring_info *re,
2473 unsigned length)
2474{
2475#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2476 /* Some architectures need the IP header to be aligned */
2477 if (!IS_ALIGNED(re->data_addr + ETH_HLEN, sizeof(u32)))
2478 return true;
2479#endif
2480 return length < copybreak;
2481}
2482
2471/* For small just reuse existing skb for next receive */ 2483/* For small just reuse existing skb for next receive */
2472static struct sk_buff *receive_copy(struct sky2_port *sky2, 2484static struct sk_buff *receive_copy(struct sky2_port *sky2,
2473 const struct rx_ring_info *re, 2485 const struct rx_ring_info *re,
@@ -2598,7 +2610,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2598 goto error; 2610 goto error;
2599 2611
2600okay: 2612okay:
2601 if (length < copybreak) 2613 if (needs_copy(re, length))
2602 skb = receive_copy(sky2, re, length); 2614 skb = receive_copy(sky2, re, length);
2603 else 2615 else
2604 skb = receive_new(sky2, re, length); 2616 skb = receive_new(sky2, re, length);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 9e2b911a1230..d69fee41f24a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -83,8 +83,9 @@
83 83
84#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ) 84#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
85 85
86#define MLX4_EN_ALLOC_ORDER 2 86/* Use the maximum between 16384 and a single page */
87#define MLX4_EN_ALLOC_SIZE (PAGE_SIZE << MLX4_EN_ALLOC_ORDER) 87#define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384)
88#define MLX4_EN_ALLOC_ORDER get_order(MLX4_EN_ALLOC_SIZE)
88 89
89#define MLX4_EN_MAX_LRO_DESCRIPTORS 32 90#define MLX4_EN_MAX_LRO_DESCRIPTORS 32
90 91
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index c722aa607d07..f8dda009d3c0 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -889,16 +889,17 @@ static int ks8851_net_stop(struct net_device *dev)
889 netif_stop_queue(dev); 889 netif_stop_queue(dev);
890 890
891 mutex_lock(&ks->lock); 891 mutex_lock(&ks->lock);
892 /* turn off the IRQs and ack any outstanding */
893 ks8851_wrreg16(ks, KS_IER, 0x0000);
894 ks8851_wrreg16(ks, KS_ISR, 0xffff);
895 mutex_unlock(&ks->lock);
892 896
893 /* stop any outstanding work */ 897 /* stop any outstanding work */
894 flush_work(&ks->irq_work); 898 flush_work(&ks->irq_work);
895 flush_work(&ks->tx_work); 899 flush_work(&ks->tx_work);
896 flush_work(&ks->rxctrl_work); 900 flush_work(&ks->rxctrl_work);
897 901
898 /* turn off the IRQs and ack any outstanding */ 902 mutex_lock(&ks->lock);
899 ks8851_wrreg16(ks, KS_IER, 0x0000);
900 ks8851_wrreg16(ks, KS_ISR, 0xffff);
901
902 /* shutdown RX process */ 903 /* shutdown RX process */
903 ks8851_wrreg16(ks, KS_RXCR1, 0x0000); 904 ks8851_wrreg16(ks, KS_RXCR1, 0x0000);
904 905
@@ -907,6 +908,7 @@ static int ks8851_net_stop(struct net_device *dev)
907 908
908 /* set powermode to soft power down to save power */ 909 /* set powermode to soft power down to save power */
909 ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN); 910 ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
911 mutex_unlock(&ks->lock);
910 912
911 /* ensure any queued tx buffers are dumped */ 913 /* ensure any queued tx buffers are dumped */
912 while (!skb_queue_empty(&ks->txq)) { 914 while (!skb_queue_empty(&ks->txq)) {
@@ -918,7 +920,6 @@ static int ks8851_net_stop(struct net_device *dev)
918 dev_kfree_skb(txb); 920 dev_kfree_skb(txb);
919 } 921 }
920 922
921 mutex_unlock(&ks->lock);
922 return 0; 923 return 0;
923} 924}
924 925
@@ -1418,6 +1419,7 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1418 struct net_device *ndev; 1419 struct net_device *ndev;
1419 struct ks8851_net *ks; 1420 struct ks8851_net *ks;
1420 int ret; 1421 int ret;
1422 unsigned cider;
1421 1423
1422 ndev = alloc_etherdev(sizeof(struct ks8851_net)); 1424 ndev = alloc_etherdev(sizeof(struct ks8851_net));
1423 if (!ndev) 1425 if (!ndev)
@@ -1484,8 +1486,8 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1484 ks8851_soft_reset(ks, GRR_GSR); 1486 ks8851_soft_reset(ks, GRR_GSR);
1485 1487
1486 /* simple check for a valid chip being connected to the bus */ 1488 /* simple check for a valid chip being connected to the bus */
1487 1489 cider = ks8851_rdreg16(ks, KS_CIDER);
1488 if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { 1490 if ((cider & ~CIDER_REV_MASK) != CIDER_ID) {
1489 dev_err(&spi->dev, "failed to read device ID\n"); 1491 dev_err(&spi->dev, "failed to read device ID\n");
1490 ret = -ENODEV; 1492 ret = -ENODEV;
1491 goto err_id; 1493 goto err_id;
@@ -1516,15 +1518,14 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1516 } 1518 }
1517 1519
1518 netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n", 1520 netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
1519 CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)), 1521 CIDER_REV_GET(cider), ndev->dev_addr, ndev->irq,
1520 ndev->dev_addr, ndev->irq,
1521 ks->rc_ccr & CCR_EEPROM ? "has" : "no"); 1522 ks->rc_ccr & CCR_EEPROM ? "has" : "no");
1522 1523
1523 return 0; 1524 return 0;
1524 1525
1525 1526
1526err_netdev: 1527err_netdev:
1527 free_irq(ndev->irq, ndev); 1528 free_irq(ndev->irq, ks);
1528 1529
1529err_id: 1530err_id:
1530err_irq: 1531err_irq:
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index b8104d9f4081..5ffde23ac8fb 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -40,7 +40,7 @@
40#define DRV_NAME "ks8851_mll" 40#define DRV_NAME "ks8851_mll"
41 41
42static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; 42static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
43#define MAX_RECV_FRAMES 32 43#define MAX_RECV_FRAMES 255
44#define MAX_BUF_SIZE 2048 44#define MAX_BUF_SIZE 2048
45#define TX_BUF_SIZE 2000 45#define TX_BUF_SIZE 2000
46#define RX_BUF_SIZE 2000 46#define RX_BUF_SIZE 2000
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ef723b185d85..eaf9ff0262a9 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5675,7 +5675,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
5675 memcpy(hw->override_addr, mac->sa_data, ETH_ALEN); 5675 memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
5676 } 5676 }
5677 5677
5678 memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN); 5678 memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN);
5679 5679
5680 interrupt = hw_block_intr(hw); 5680 interrupt = hw_block_intr(hw);
5681 5681
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 69444247c20b..6dfc26d85e47 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1441,7 +1441,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1441 } 1441 }
1442#endif 1442#endif
1443 if (!is_valid_ether_addr(ndev->dev_addr)) 1443 if (!is_valid_ether_addr(ndev->dev_addr))
1444 dev_hw_addr_random(ndev, ndev->dev_addr); 1444 eth_hw_addr_random(ndev);
1445 1445
1446 /* Reset the ethernet controller */ 1446 /* Reset the ethernet controller */
1447 __lpc_eth_reset(pldat); 1447 __lpc_eth_reset(pldat);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index abc79076f867..b3287c0fe279 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -958,6 +958,11 @@ static inline void cp_start_hw (struct cp_private *cp)
958 cpw8(Cmd, RxOn | TxOn); 958 cpw8(Cmd, RxOn | TxOn);
959} 959}
960 960
961static void cp_enable_irq(struct cp_private *cp)
962{
963 cpw16_f(IntrMask, cp_intr_mask);
964}
965
961static void cp_init_hw (struct cp_private *cp) 966static void cp_init_hw (struct cp_private *cp)
962{ 967{
963 struct net_device *dev = cp->dev; 968 struct net_device *dev = cp->dev;
@@ -997,8 +1002,6 @@ static void cp_init_hw (struct cp_private *cp)
997 1002
998 cpw16(MultiIntr, 0); 1003 cpw16(MultiIntr, 0);
999 1004
1000 cpw16_f(IntrMask, cp_intr_mask);
1001
1002 cpw8_f(Cfg9346, Cfg9346_Lock); 1005 cpw8_f(Cfg9346, Cfg9346_Lock);
1003} 1006}
1004 1007
@@ -1130,6 +1133,8 @@ static int cp_open (struct net_device *dev)
1130 if (rc) 1133 if (rc)
1131 goto err_out_hw; 1134 goto err_out_hw;
1132 1135
1136 cp_enable_irq(cp);
1137
1133 netif_carrier_off(dev); 1138 netif_carrier_off(dev);
1134 mii_check_media(&cp->mii_if, netif_msg_link(cp), true); 1139 mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1135 netif_start_queue(dev); 1140 netif_start_queue(dev);
@@ -2031,6 +2036,7 @@ static int cp_resume (struct pci_dev *pdev)
2031 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ 2036 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2032 cp_init_rings_index (cp); 2037 cp_init_rings_index (cp);
2033 cp_init_hw (cp); 2038 cp_init_hw (cp);
2039 cp_enable_irq(cp);
2034 netif_start_queue (dev); 2040 netif_start_queue (dev);
2035 2041
2036 spin_lock_irqsave (&cp->lock, flags); 2042 spin_lock_irqsave (&cp->lock, flags);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 7b23554f80b6..f54509377efa 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5810,7 +5810,10 @@ static void __rtl8169_resume(struct net_device *dev)
5810 5810
5811 rtl_pll_power_up(tp); 5811 rtl_pll_power_up(tp);
5812 5812
5813 rtl_lock_work(tp);
5814 napi_enable(&tp->napi);
5813 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); 5815 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
5816 rtl_unlock_work(tp);
5814 5817
5815 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); 5818 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5816} 5819}
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 9755b49bbefb..3fb2355af37e 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -7,7 +7,8 @@ config SH_ETH
7 depends on SUPERH && \ 7 depends on SUPERH && \
8 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \ 8 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
9 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \ 9 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
10 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7757) 10 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \
11 CPU_SUBTYPE_SH7757)
11 select CRC32 12 select CRC32
12 select NET_CORE 13 select NET_CORE
13 select MII 14 select MII
@@ -16,4 +17,4 @@ config SH_ETH
16 ---help--- 17 ---help---
17 Renesas SuperH Ethernet device driver. 18 Renesas SuperH Ethernet device driver.
18 This driver supporting CPUs are: 19 This driver supporting CPUs are:
19 - SH7710, SH7712, SH7763, SH7619, SH7724, and SH7757. 20 - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763 and SH7757.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 8615961c1287..d63e09b29a96 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * SuperH Ethernet device driver 2 * SuperH Ethernet device driver
3 * 3 *
4 * Copyright (C) 2006-2008 Nobuhiro Iwamatsu 4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5 * Copyright (C) 2008-2009 Renesas Solutions Corp. 5 * Copyright (C) 2008-2012 Renesas Solutions Corp.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License, 8 * under the terms and conditions of the GNU General Public License,
@@ -38,6 +38,7 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/ethtool.h> 39#include <linux/ethtool.h>
40#include <linux/if_vlan.h> 40#include <linux/if_vlan.h>
41#include <linux/clk.h>
41#include <linux/sh_eth.h> 42#include <linux/sh_eth.h>
42 43
43#include "sh_eth.h" 44#include "sh_eth.h"
@@ -279,8 +280,9 @@ static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
279 return &sh_eth_my_cpu_data; 280 return &sh_eth_my_cpu_data;
280} 281}
281 282
282#elif defined(CONFIG_CPU_SUBTYPE_SH7763) 283#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
283#define SH_ETH_HAS_TSU 1 284#define SH_ETH_HAS_TSU 1
285static void sh_eth_reset_hw_crc(struct net_device *ndev);
284static void sh_eth_chip_reset(struct net_device *ndev) 286static void sh_eth_chip_reset(struct net_device *ndev)
285{ 287{
286 struct sh_eth_private *mdp = netdev_priv(ndev); 288 struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -314,6 +316,9 @@ static void sh_eth_reset(struct net_device *ndev)
314 sh_eth_write(ndev, 0x0, RDFAR); 316 sh_eth_write(ndev, 0x0, RDFAR);
315 sh_eth_write(ndev, 0x0, RDFXR); 317 sh_eth_write(ndev, 0x0, RDFXR);
316 sh_eth_write(ndev, 0x0, RDFFR); 318 sh_eth_write(ndev, 0x0, RDFFR);
319
320 /* Reset HW CRC register */
321 sh_eth_reset_hw_crc(ndev);
317} 322}
318 323
319static void sh_eth_set_duplex(struct net_device *ndev) 324static void sh_eth_set_duplex(struct net_device *ndev)
@@ -370,8 +375,17 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
370 .no_trimd = 1, 375 .no_trimd = 1,
371 .no_ade = 1, 376 .no_ade = 1,
372 .tsu = 1, 377 .tsu = 1,
378#if defined(CONFIG_CPU_SUBTYPE_SH7734)
379 .hw_crc = 1,
380#endif
373}; 381};
374 382
383static void sh_eth_reset_hw_crc(struct net_device *ndev)
384{
385 if (sh_eth_my_cpu_data.hw_crc)
386 sh_eth_write(ndev, 0x0, CSMR);
387}
388
375#elif defined(CONFIG_CPU_SUBTYPE_SH7619) 389#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
376#define SH_ETH_RESET_DEFAULT 1 390#define SH_ETH_RESET_DEFAULT 1
377static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 391static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
@@ -790,7 +804,7 @@ static int sh_eth_dev_init(struct net_device *ndev)
790 /* all sh_eth int mask */ 804 /* all sh_eth int mask */
791 sh_eth_write(ndev, 0, EESIPR); 805 sh_eth_write(ndev, 0, EESIPR);
792 806
793#if defined(__LITTLE_ENDIAN__) 807#if defined(__LITTLE_ENDIAN)
794 if (mdp->cd->hw_swap) 808 if (mdp->cd->hw_swap)
795 sh_eth_write(ndev, EDMR_EL, EDMR); 809 sh_eth_write(ndev, EDMR_EL, EDMR);
796 else 810 else
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 57dc26261116..0fa14afce23d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * SuperH Ethernet device driver 2 * SuperH Ethernet device driver
3 * 3 *
4 * Copyright (C) 2006-2008 Nobuhiro Iwamatsu 4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5 * Copyright (C) 2008-2011 Renesas Solutions Corp. 5 * Copyright (C) 2008-2012 Renesas Solutions Corp.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License, 8 * under the terms and conditions of the GNU General Public License,
@@ -98,6 +98,8 @@ enum {
98 CEECR, 98 CEECR,
99 MAFCR, 99 MAFCR,
100 RTRATE, 100 RTRATE,
101 CSMR,
102 RMII_MII,
101 103
102 /* TSU Absolute address */ 104 /* TSU Absolute address */
103 ARSTR, 105 ARSTR,
@@ -172,6 +174,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
172 [RMCR] = 0x0458, 174 [RMCR] = 0x0458,
173 [RPADIR] = 0x0460, 175 [RPADIR] = 0x0460,
174 [FCFTR] = 0x0468, 176 [FCFTR] = 0x0468,
177 [CSMR] = 0x04E4,
175 178
176 [ECMR] = 0x0500, 179 [ECMR] = 0x0500,
177 [ECSR] = 0x0510, 180 [ECSR] = 0x0510,
@@ -200,6 +203,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
200 [CERCR] = 0x0768, 203 [CERCR] = 0x0768,
201 [CEECR] = 0x0770, 204 [CEECR] = 0x0770,
202 [MAFCR] = 0x0778, 205 [MAFCR] = 0x0778,
206 [RMII_MII] = 0x0790,
203 207
204 [ARSTR] = 0x0000, 208 [ARSTR] = 0x0000,
205 [TSU_CTRST] = 0x0004, 209 [TSU_CTRST] = 0x0004,
@@ -377,7 +381,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
377/* 381/*
378 * Register's bits 382 * Register's bits
379 */ 383 */
380#ifdef CONFIG_CPU_SUBTYPE_SH7763 384#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
381/* EDSR */ 385/* EDSR */
382enum EDSR_BIT { 386enum EDSR_BIT {
383 EDSR_ENT = 0x01, EDSR_ENR = 0x02, 387 EDSR_ENT = 0x01, EDSR_ENR = 0x02,
@@ -689,7 +693,7 @@ enum TSU_FWSLC_BIT {
689 */ 693 */
690struct sh_eth_txdesc { 694struct sh_eth_txdesc {
691 u32 status; /* TD0 */ 695 u32 status; /* TD0 */
692#if defined(CONFIG_CPU_LITTLE_ENDIAN) 696#if defined(__LITTLE_ENDIAN)
693 u16 pad0; /* TD1 */ 697 u16 pad0; /* TD1 */
694 u16 buffer_length; /* TD1 */ 698 u16 buffer_length; /* TD1 */
695#else 699#else
@@ -706,7 +710,7 @@ struct sh_eth_txdesc {
706 */ 710 */
707struct sh_eth_rxdesc { 711struct sh_eth_rxdesc {
708 u32 status; /* RD0 */ 712 u32 status; /* RD0 */
709#if defined(CONFIG_CPU_LITTLE_ENDIAN) 713#if defined(__LITTLE_ENDIAN)
710 u16 frame_length; /* RD1 */ 714 u16 frame_length; /* RD1 */
711 u16 buffer_length; /* RD1 */ 715 u16 buffer_length; /* RD1 */
712#else 716#else
@@ -751,6 +755,7 @@ struct sh_eth_cpu_data {
751 unsigned rpadir:1; /* E-DMAC have RPADIR */ 755 unsigned rpadir:1; /* E-DMAC have RPADIR */
752 unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */ 756 unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
753 unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */ 757 unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
758 unsigned hw_crc:1; /* E-DMAC have CSMR */
754}; 759};
755 760
756struct sh_eth_private { 761struct sh_eth_private {
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 4a6971027076..cd3defb11ffb 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1166,10 +1166,8 @@ smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat)
1166 1166
1167/* Quickly dumps bad packets */ 1167/* Quickly dumps bad packets */
1168static void 1168static void
1169smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes) 1169smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords)
1170{ 1170{
1171 unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2;
1172
1173 if (likely(pktwords >= 4)) { 1171 if (likely(pktwords >= 4)) {
1174 unsigned int timeout = 500; 1172 unsigned int timeout = 500;
1175 unsigned int val; 1173 unsigned int val;
@@ -1233,7 +1231,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
1233 continue; 1231 continue;
1234 } 1232 }
1235 1233
1236 skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN); 1234 skb = netdev_alloc_skb(dev, pktwords << 2);
1237 if (unlikely(!skb)) { 1235 if (unlikely(!skb)) {
1238 SMSC_WARN(pdata, rx_err, 1236 SMSC_WARN(pdata, rx_err,
1239 "Unable to allocate skb for rx packet"); 1237 "Unable to allocate skb for rx packet");
@@ -1243,14 +1241,12 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
1243 break; 1241 break;
1244 } 1242 }
1245 1243
1246 skb->data = skb->head; 1244 pdata->ops->rx_readfifo(pdata,
1247 skb_reset_tail_pointer(skb); 1245 (unsigned int *)skb->data, pktwords);
1248 1246
1249 /* Align IP on 16B boundary */ 1247 /* Align IP on 16B boundary */
1250 skb_reserve(skb, NET_IP_ALIGN); 1248 skb_reserve(skb, NET_IP_ALIGN);
1251 skb_put(skb, pktlength - 4); 1249 skb_put(skb, pktlength - 4);
1252 pdata->ops->rx_readfifo(pdata,
1253 (unsigned int *)skb->head, pktwords);
1254 skb->protocol = eth_type_trans(skb, dev); 1250 skb->protocol = eth_type_trans(skb, dev);
1255 skb_checksum_none_assert(skb); 1251 skb_checksum_none_assert(skb);
1256 netif_receive_skb(skb); 1252 netif_receive_skb(skb);
@@ -1565,7 +1561,7 @@ static int smsc911x_open(struct net_device *dev)
1565 smsc911x_reg_write(pdata, FIFO_INT, temp); 1561 smsc911x_reg_write(pdata, FIFO_INT, temp);
1566 1562
1567 /* set RX Data offset to 2 bytes for alignment */ 1563 /* set RX Data offset to 2 bytes for alignment */
1568 smsc911x_reg_write(pdata, RX_CFG, (2 << 8)); 1564 smsc911x_reg_write(pdata, RX_CFG, (NET_IP_ALIGN << 8));
1569 1565
1570 /* enable NAPI polling before enabling RX interrupts */ 1566 /* enable NAPI polling before enabling RX interrupts */
1571 napi_enable(&pdata->napi); 1567 napi_enable(&pdata->napi);
@@ -2382,7 +2378,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2382 SET_NETDEV_DEV(dev, &pdev->dev); 2378 SET_NETDEV_DEV(dev, &pdev->dev);
2383 2379
2384 pdata = netdev_priv(dev); 2380 pdata = netdev_priv(dev);
2385
2386 dev->irq = irq_res->start; 2381 dev->irq = irq_res->start;
2387 irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; 2382 irq_flags = irq_res->flags & IRQF_TRIGGER_MASK;
2388 pdata->ioaddr = ioremap_nocache(res->start, res_size); 2383 pdata->ioaddr = ioremap_nocache(res->start, res_size);
@@ -2446,7 +2441,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2446 if (retval) { 2441 if (retval) {
2447 SMSC_WARN(pdata, probe, 2442 SMSC_WARN(pdata, probe,
2448 "Unable to claim requested irq: %d", dev->irq); 2443 "Unable to claim requested irq: %d", dev->irq);
2449 goto out_free_irq; 2444 goto out_disable_resources;
2450 } 2445 }
2451 2446
2452 retval = register_netdev(dev); 2447 retval = register_netdev(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index e85ffbd54830..48d56da62f08 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1737,10 +1737,12 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1737 struct mac_device_info *mac; 1737 struct mac_device_info *mac;
1738 1738
1739 /* Identify the MAC HW device */ 1739 /* Identify the MAC HW device */
1740 if (priv->plat->has_gmac) 1740 if (priv->plat->has_gmac) {
1741 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1741 mac = dwmac1000_setup(priv->ioaddr); 1742 mac = dwmac1000_setup(priv->ioaddr);
1742 else 1743 } else {
1743 mac = dwmac100_setup(priv->ioaddr); 1744 mac = dwmac100_setup(priv->ioaddr);
1745 }
1744 if (!mac) 1746 if (!mac)
1745 return -ENOMEM; 1747 return -ENOMEM;
1746 1748
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 2757c7d6e633..e4e47088e26b 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -181,6 +181,11 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
181 __davinci_mdio_reset(data); 181 __davinci_mdio_reset(data);
182 return -EAGAIN; 182 return -EAGAIN;
183 } 183 }
184
185 reg = __raw_readl(&regs->user[0].access);
186 if ((reg & USERACCESS_GO) == 0)
187 return 0;
188
184 dev_err(data->dev, "timed out waiting for user access\n"); 189 dev_err(data->dev, "timed out waiting for user access\n");
185 return -ETIMEDOUT; 190 return -ETIMEDOUT;
186} 191}
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 261356c2dc99..3d501ec7fad7 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -342,6 +342,21 @@ inline int __netio_fastio1(u32 fastio_index, u32 arg0)
342} 342}
343 343
344 344
345static void tile_net_return_credit(struct tile_net_cpu *info)
346{
347 struct tile_netio_queue *queue = &info->queue;
348 netio_queue_user_impl_t *qup = &queue->__user_part;
349
350 /* Return four credits after every fourth packet. */
351 if (--qup->__receive_credit_remaining == 0) {
352 u32 interval = qup->__receive_credit_interval;
353 qup->__receive_credit_remaining = interval;
354 __netio_fastio_return_credits(qup->__fastio_index, interval);
355 }
356}
357
358
359
345/* 360/*
346 * Provide a linux buffer to LIPP. 361 * Provide a linux buffer to LIPP.
347 */ 362 */
@@ -433,7 +448,7 @@ static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
433 struct sk_buff **skb_ptr; 448 struct sk_buff **skb_ptr;
434 449
435 /* Request 96 extra bytes for alignment purposes. */ 450 /* Request 96 extra bytes for alignment purposes. */
436 skb = netdev_alloc_skb(info->napi->dev, len + padding); 451 skb = netdev_alloc_skb(info->napi.dev, len + padding);
437 if (skb == NULL) 452 if (skb == NULL)
438 return false; 453 return false;
439 454
@@ -864,19 +879,11 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
864 879
865 stats->rx_packets++; 880 stats->rx_packets++;
866 stats->rx_bytes += len; 881 stats->rx_bytes += len;
867
868 if (small)
869 info->num_needed_small_buffers++;
870 else
871 info->num_needed_large_buffers++;
872 } 882 }
873 883
874 /* Return four credits after every fourth packet. */ 884 /* ISSUE: It would be nice to defer this until the packet has */
875 if (--qup->__receive_credit_remaining == 0) { 885 /* actually been processed. */
876 u32 interval = qup->__receive_credit_interval; 886 tile_net_return_credit(info);
877 qup->__receive_credit_remaining = interval;
878 __netio_fastio_return_credits(qup->__fastio_index, interval);
879 }
880 887
881 /* Consume this packet. */ 888 /* Consume this packet. */
882 qup->__packet_receive_read = index2; 889 qup->__packet_receive_read = index2;
@@ -1543,7 +1550,7 @@ static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv)
1543 1550
1544 /* Drain all the LIPP buffers. */ 1551 /* Drain all the LIPP buffers. */
1545 while (true) { 1552 while (true) {
1546 int buffer; 1553 unsigned int buffer;
1547 1554
1548 /* NOTE: This should never fail. */ 1555 /* NOTE: This should never fail. */
1549 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer, 1556 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer,
@@ -1707,7 +1714,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
1707 if (!hash_default) { 1714 if (!hash_default) {
1708 void *va = pfn_to_kaddr(pfn) + f->page_offset; 1715 void *va = pfn_to_kaddr(pfn) + f->page_offset;
1709 BUG_ON(PageHighMem(skb_frag_page(f))); 1716 BUG_ON(PageHighMem(skb_frag_page(f)));
1710 finv_buffer_remote(va, f->size, 0); 1717 finv_buffer_remote(va, skb_frag_size(f), 0);
1711 } 1718 }
1712 1719
1713 cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; 1720 cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
@@ -1735,8 +1742,8 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
1735 * Sometimes, if "sendfile()" requires copying, we will be called with 1742 * Sometimes, if "sendfile()" requires copying, we will be called with
1736 * "data" containing the header and payload, with "frags" being empty. 1743 * "data" containing the header and payload, with "frags" being empty.
1737 * 1744 *
1738 * In theory, "sh->nr_frags" could be 3, but in practice, it seems 1745 * Sometimes, for example when using NFS over TCP, a single segment can
1739 * that this will never actually happen. 1746 * span 3 fragments, which must be handled carefully in LEPP.
1740 * 1747 *
1741 * See "emulate_large_send_offload()" for some reference code, which 1748 * See "emulate_large_send_offload()" for some reference code, which
1742 * does not handle checksumming. 1749 * does not handle checksumming.
@@ -1844,10 +1851,8 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
1844 1851
1845 spin_lock_irqsave(&priv->eq_lock, irqflags); 1852 spin_lock_irqsave(&priv->eq_lock, irqflags);
1846 1853
1847 /* 1854 /* Handle completions if needed to make room. */
1848 * Handle completions if needed to make room. 1855 /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */
1849 * HACK: Spin until there is sufficient room.
1850 */
1851 if (lepp_num_free_comp_slots(eq) == 0) { 1856 if (lepp_num_free_comp_slots(eq) == 0) {
1852 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); 1857 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
1853 if (nolds == 0) { 1858 if (nolds == 0) {
@@ -1861,6 +1866,7 @@ busy:
1861 cmd_tail = eq->cmd_tail; 1866 cmd_tail = eq->cmd_tail;
1862 1867
1863 /* Prepare to advance, detecting full queue. */ 1868 /* Prepare to advance, detecting full queue. */
1869 /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */
1864 cmd_next = cmd_tail + cmd_size; 1870 cmd_next = cmd_tail + cmd_size;
1865 if (cmd_tail < cmd_head && cmd_next >= cmd_head) 1871 if (cmd_tail < cmd_head && cmd_next >= cmd_head)
1866 goto busy; 1872 goto busy;
@@ -2023,10 +2029,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
2023 2029
2024 spin_lock_irqsave(&priv->eq_lock, irqflags); 2030 spin_lock_irqsave(&priv->eq_lock, irqflags);
2025 2031
2026 /* 2032 /* Handle completions if needed to make room. */
2027 * Handle completions if needed to make room. 2033 /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */
2028 * HACK: Spin until there is sufficient room.
2029 */
2030 if (lepp_num_free_comp_slots(eq) == 0) { 2034 if (lepp_num_free_comp_slots(eq) == 0) {
2031 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); 2035 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
2032 if (nolds == 0) { 2036 if (nolds == 0) {
@@ -2040,6 +2044,7 @@ busy:
2040 cmd_tail = eq->cmd_tail; 2044 cmd_tail = eq->cmd_tail;
2041 2045
2042 /* Copy the commands, or fail. */ 2046 /* Copy the commands, or fail. */
2047 /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */
2043 for (i = 0; i < num_frags; i++) { 2048 for (i = 0; i < num_frags; i++) {
2044 2049
2045 /* Prepare to advance, detecting full queue. */ 2050 /* Prepare to advance, detecting full queue. */
@@ -2261,6 +2266,23 @@ static int tile_net_get_mac(struct net_device *dev)
2261 return 0; 2266 return 0;
2262} 2267}
2263 2268
2269
2270#ifdef CONFIG_NET_POLL_CONTROLLER
2271/*
2272 * Polling 'interrupt' - used by things like netconsole to send skbs
2273 * without having to re-enable interrupts. It's not called while
2274 * the interrupt routine is executing.
2275 */
2276static void tile_net_netpoll(struct net_device *dev)
2277{
2278 struct tile_net_priv *priv = netdev_priv(dev);
2279 disable_percpu_irq(priv->intr_id);
2280 tile_net_handle_ingress_interrupt(priv->intr_id, dev);
2281 enable_percpu_irq(priv->intr_id, 0);
2282}
2283#endif
2284
2285
2264static const struct net_device_ops tile_net_ops = { 2286static const struct net_device_ops tile_net_ops = {
2265 .ndo_open = tile_net_open, 2287 .ndo_open = tile_net_open,
2266 .ndo_stop = tile_net_stop, 2288 .ndo_stop = tile_net_stop,
@@ -2269,7 +2291,10 @@ static const struct net_device_ops tile_net_ops = {
2269 .ndo_get_stats = tile_net_get_stats, 2291 .ndo_get_stats = tile_net_get_stats,
2270 .ndo_change_mtu = tile_net_change_mtu, 2292 .ndo_change_mtu = tile_net_change_mtu,
2271 .ndo_tx_timeout = tile_net_tx_timeout, 2293 .ndo_tx_timeout = tile_net_tx_timeout,
2272 .ndo_set_mac_address = tile_net_set_mac_address 2294 .ndo_set_mac_address = tile_net_set_mac_address,
2295#ifdef CONFIG_NET_POLL_CONTROLLER
2296 .ndo_poll_controller = tile_net_netpoll,
2297#endif
2273}; 2298};
2274 2299
2275 2300
@@ -2409,7 +2434,7 @@ static void tile_net_cleanup(void)
2409 */ 2434 */
2410static int tile_net_init_module(void) 2435static int tile_net_init_module(void)
2411{ 2436{
2412 pr_info("Tilera IPP Net Driver\n"); 2437 pr_info("Tilera Network Driver\n");
2413 2438
2414 tile_net_devs[0] = tile_net_dev_init("xgbe0"); 2439 tile_net_devs[0] = tile_net_dev_init("xgbe0");
2415 tile_net_devs[1] = tile_net_dev_init("xgbe1"); 2440 tile_net_devs[1] = tile_net_dev_init("xgbe1");
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 39b8cf3dafcd..fcfa01f7ceb6 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -503,30 +503,32 @@ static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
503static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); 503static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
504static void rhine_restart_tx(struct net_device *dev); 504static void rhine_restart_tx(struct net_device *dev);
505 505
506static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool high) 506static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
507{ 507{
508 void __iomem *ioaddr = rp->base; 508 void __iomem *ioaddr = rp->base;
509 int i; 509 int i;
510 510
511 for (i = 0; i < 1024; i++) { 511 for (i = 0; i < 1024; i++) {
512 if (high ^ !!(ioread8(ioaddr + reg) & mask)) 512 bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
513
514 if (low ^ has_mask_bits)
513 break; 515 break;
514 udelay(10); 516 udelay(10);
515 } 517 }
516 if (i > 64) { 518 if (i > 64) {
517 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle " 519 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
518 "count: %04d\n", high ? "high" : "low", reg, mask, i); 520 "count: %04d\n", low ? "low" : "high", reg, mask, i);
519 } 521 }
520} 522}
521 523
522static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask) 524static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
523{ 525{
524 rhine_wait_bit(rp, reg, mask, true); 526 rhine_wait_bit(rp, reg, mask, false);
525} 527}
526 528
527static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask) 529static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
528{ 530{
529 rhine_wait_bit(rp, reg, mask, false); 531 rhine_wait_bit(rp, reg, mask, true);
530} 532}
531 533
532static u32 rhine_get_events(struct rhine_private *rp) 534static u32 rhine_get_events(struct rhine_private *rp)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index cc83af083fd7..44b8d2bad8c3 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -2,9 +2,7 @@
2 * Definitions for Xilinx Axi Ethernet device driver. 2 * Definitions for Xilinx Axi Ethernet device driver.
3 * 3 *
4 * Copyright (c) 2009 Secret Lab Technologies, Ltd. 4 * Copyright (c) 2009 Secret Lab Technologies, Ltd.
5 * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 5 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
6 * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
7 * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
8 */ 6 */
9 7
10#ifndef XILINX_AXIENET_H 8#ifndef XILINX_AXIENET_H
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 2fcbeba6814b..9c365e192a31 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -4,9 +4,9 @@
4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
7 * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 7 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
8 * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch> 8 * Copyright (c) 2010 - 2011 PetaLogix
9 * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch> 9 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
10 * 10 *
11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
12 * and Spartan6. 12 * and Spartan6.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index d70b6e79f6c0..e90e1f46121e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -2,9 +2,9 @@
2 * MDIO bus driver for the Xilinx Axi Ethernet device 2 * MDIO bus driver for the Xilinx Axi Ethernet device
3 * 3 *
4 * Copyright (c) 2009 Secret Lab Technologies, Ltd. 4 * Copyright (c) 2009 Secret Lab Technologies, Ltd.
5 * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 5 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
6 * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch> 6 * Copyright (c) 2010 - 2011 PetaLogix
7 * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch> 7 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
8 */ 8 */
9 9
10#include <linux/of_address.h> 10#include <linux/of_address.h>