aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-07-16 17:09:34 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-16 17:09:34 -0400
commit1a98c69af1ecd97bfd1f4e4539924a9192434e36 (patch)
treea243defcf921ea174f8e43fce11d06830a6a9c36 /drivers/net
parent7a575f6b907ea5d207d2b5010293c189616eae34 (diff)
parentb6603fe574af289dbe9eb9fb4c540bca04f5a053 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c43
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c7
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h18
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c66
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h12
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c16
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c2
-rw-r--r--drivers/net/fddi/defxx.c17
-rw-r--r--drivers/net/phy/dp83640.c6
-rw-r--r--drivers/net/phy/mdio_bus.c44
-rw-r--r--drivers/net/ppp/ppp_generic.c8
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/usb/hso.c50
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c7
-rw-r--r--drivers/net/usb/smsc95xx.c14
-rw-r--r--drivers/net/wan/farsync.c112
-rw-r--r--drivers/net/xen-netfront.c27
38 files changed, 389 insertions, 275 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 041036e31aa3..1ff676caa9cd 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4037,7 +4037,7 @@ static int bond_check_params(struct bond_params *params)
4037 } 4037 }
4038 4038
4039 if (ad_select) { 4039 if (ad_select) {
4040 bond_opt_initstr(&newval, lacp_rate); 4040 bond_opt_initstr(&newval, ad_select);
4041 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT), 4041 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
4042 &newval); 4042 &newval);
4043 if (!valptr) { 4043 if (!valptr) {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 31b5340c685e..6f4e18644bd4 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -710,13 +710,13 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
710 710
711 work_done = bcm_sysport_tx_reclaim(ring->priv, ring); 711 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
712 712
713 if (work_done < budget) { 713 if (work_done == 0) {
714 napi_complete(napi); 714 napi_complete(napi);
715 /* re-enable TX interrupt */ 715 /* re-enable TX interrupt */
716 intrl2_1_mask_clear(ring->priv, BIT(ring->index)); 716 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
717 } 717 }
718 718
719 return work_done; 719 return 0;
720} 720}
721 721
722static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) 722static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
@@ -1339,28 +1339,17 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1339 usleep_range(1000, 2000); 1339 usleep_range(1000, 2000);
1340} 1340}
1341 1341
1342static inline int umac_reset(struct bcm_sysport_priv *priv) 1342static inline void umac_reset(struct bcm_sysport_priv *priv)
1343{ 1343{
1344 unsigned int timeout = 0;
1345 u32 reg; 1344 u32 reg;
1346 int ret = 0;
1347
1348 umac_writel(priv, 0, UMAC_CMD);
1349 while (timeout++ < 1000) {
1350 reg = umac_readl(priv, UMAC_CMD);
1351 if (!(reg & CMD_SW_RESET))
1352 break;
1353
1354 udelay(1);
1355 }
1356
1357 if (timeout == 1000) {
1358 dev_err(&priv->pdev->dev,
1359 "timeout waiting for MAC to come out of reset\n");
1360 ret = -ETIMEDOUT;
1361 }
1362 1345
1363 return ret; 1346 reg = umac_readl(priv, UMAC_CMD);
1347 reg |= CMD_SW_RESET;
1348 umac_writel(priv, reg, UMAC_CMD);
1349 udelay(10);
1350 reg = umac_readl(priv, UMAC_CMD);
1351 reg &= ~CMD_SW_RESET;
1352 umac_writel(priv, reg, UMAC_CMD);
1364} 1353}
1365 1354
1366static void umac_set_hw_addr(struct bcm_sysport_priv *priv, 1355static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
@@ -1412,11 +1401,7 @@ static int bcm_sysport_open(struct net_device *dev)
1412 int ret; 1401 int ret;
1413 1402
1414 /* Reset UniMAC */ 1403 /* Reset UniMAC */
1415 ret = umac_reset(priv); 1404 umac_reset(priv);
1416 if (ret) {
1417 netdev_err(dev, "UniMAC reset failed\n");
1418 return ret;
1419 }
1420 1405
1421 /* Flush TX and RX FIFOs at TOPCTRL level */ 1406 /* Flush TX and RX FIFOs at TOPCTRL level */
1422 topctrl_flush(priv); 1407 topctrl_flush(priv);
@@ -1699,12 +1684,6 @@ static int bcm_sysport_probe(struct platform_device *pdev)
1699 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); 1684 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
1700 dev->needed_headroom += sizeof(struct bcm_tsb); 1685 dev->needed_headroom += sizeof(struct bcm_tsb);
1701 1686
1702 /* We are interfaced to a switch which handles the multicast
1703 * filtering for us, so we do not support programming any
1704 * multicast hash table in this Ethernet MAC.
1705 */
1706 dev->flags &= ~IFF_MULTICAST;
1707
1708 /* libphy will adjust the link state accordingly */ 1687 /* libphy will adjust the link state accordingly */
1709 netif_carrier_off(dev); 1688 netif_carrier_off(dev);
1710 1689
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index cb15e3ac03c4..dca1236dd1cd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -797,7 +797,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
797 797
798 return; 798 return;
799 } 799 }
800 bnx2x_frag_free(fp, new_data); 800 if (new_data)
801 bnx2x_frag_free(fp, new_data);
801drop: 802drop:
802 /* drop the packet and keep the buffer in the bin */ 803 /* drop the packet and keep the buffer in the bin */
803 DP(NETIF_MSG_RX_STATUS, 804 DP(NETIF_MSG_RX_STATUS,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 6af9e3c046a0..3871ec49cc4d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12946,7 +12946,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
12946 * without the default SB. 12946 * without the default SB.
12947 * For VFs there is no default SB, then we return (index+1). 12947 * For VFs there is no default SB, then we return (index+1).
12948 */ 12948 */
12949 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control); 12949 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
12950 12950
12951 index = control & PCI_MSIX_FLAGS_QSIZE; 12951 index = control & PCI_MSIX_FLAGS_QSIZE;
12952 12952
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 5ba1cfbd60da..16281ad2da12 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1408,13 +1408,6 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
1408 if (cb->skb) 1408 if (cb->skb)
1409 continue; 1409 continue;
1410 1410
1411 /* set the DMA descriptor length once and for all
1412 * it will only change if we support dynamically sizing
1413 * priv->rx_buf_len, but we do not
1414 */
1415 dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
1416 priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
1417
1418 ret = bcmgenet_rx_refill(priv, cb); 1411 ret = bcmgenet_rx_refill(priv, cb);
1419 if (ret) 1412 if (ret)
1420 break; 1413 break;
@@ -2535,14 +2528,17 @@ static int bcmgenet_probe(struct platform_device *pdev)
2535 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); 2528 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
2536 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); 2529 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
2537 2530
2538 err = register_netdev(dev); 2531 /* libphy will determine the link state */
2539 if (err) 2532 netif_carrier_off(dev);
2540 goto err_clk_disable;
2541 2533
2542 /* Turn off the main clock, WOL clock is handled separately */ 2534 /* Turn off the main clock, WOL clock is handled separately */
2543 if (!IS_ERR(priv->clk)) 2535 if (!IS_ERR(priv->clk))
2544 clk_disable_unprepare(priv->clk); 2536 clk_disable_unprepare(priv->clk);
2545 2537
2538 err = register_netdev(dev);
2539 if (err)
2540 goto err;
2541
2546 return err; 2542 return err;
2547 2543
2548err_clk_disable: 2544err_clk_disable:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 0f117105fed1..e23c993b1362 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -331,9 +331,9 @@ struct bcmgenet_mib_counters {
331#define EXT_ENERGY_DET_MASK (1 << 12) 331#define EXT_ENERGY_DET_MASK (1 << 12)
332 332
333#define EXT_RGMII_OOB_CTRL 0x0C 333#define EXT_RGMII_OOB_CTRL 0x0C
334#define RGMII_MODE_EN (1 << 0)
335#define RGMII_LINK (1 << 4) 334#define RGMII_LINK (1 << 4)
336#define OOB_DISABLE (1 << 5) 335#define OOB_DISABLE (1 << 5)
336#define RGMII_MODE_EN (1 << 6)
337#define ID_MODE_DIS (1 << 16) 337#define ID_MODE_DIS (1 << 16)
338 338
339#define EXT_GPHY_CTRL 0x1C 339#define EXT_GPHY_CTRL 0x1C
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6297e72b77e2..9bced68527a9 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2897,7 +2897,7 @@ static int be_open(struct net_device *netdev)
2897 for_all_evt_queues(adapter, eqo, i) { 2897 for_all_evt_queues(adapter, eqo, i) {
2898 napi_enable(&eqo->napi); 2898 napi_enable(&eqo->napi);
2899 be_enable_busy_poll(eqo); 2899 be_enable_busy_poll(eqo);
2900 be_eq_notify(adapter, eqo->q.id, true, false, 0); 2900 be_eq_notify(adapter, eqo->q.id, true, true, 0);
2901 } 2901 }
2902 adapter->flags |= BE_FLAGS_NAPI_ENABLED; 2902 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2903 2903
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index fab39e295441..36fc429298e3 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -2990,11 +2990,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2990 if (ug_info->rxExtendedFiltering) { 2990 if (ug_info->rxExtendedFiltering) {
2991 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; 2991 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
2992 if (ug_info->largestexternallookupkeysize == 2992 if (ug_info->largestexternallookupkeysize ==
2993 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) 2993 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
2994 size += 2994 size +=
2995 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; 2995 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
2996 if (ug_info->largestexternallookupkeysize == 2996 if (ug_info->largestexternallookupkeysize ==
2997 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) 2997 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
2998 size += 2998 size +=
2999 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; 2999 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
3000 } 3000 }
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 168a5ee5e0ba..72b454ce05ac 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1480,6 +1480,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
1480 s32 ret_val; 1480 s32 ret_val;
1481 u16 i, rar_count = mac->rar_entry_count; 1481 u16 i, rar_count = mac->rar_entry_count;
1482 1482
1483 if ((hw->mac.type >= e1000_i210) &&
1484 !(igb_get_flash_presence_i210(hw))) {
1485 ret_val = igb_pll_workaround_i210(hw);
1486 if (ret_val)
1487 return ret_val;
1488 }
1489
1483 /* Initialize identification LED */ 1490 /* Initialize identification LED */
1484 ret_val = igb_id_led_init(hw); 1491 ret_val = igb_id_led_init(hw);
1485 if (ret_val) { 1492 if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 2a8bb35c2df2..217f8138851b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -46,14 +46,15 @@
46#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ 46#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
47 47
48/* Physical Func Reset Done Indication */ 48/* Physical Func Reset Done Indication */
49#define E1000_CTRL_EXT_PFRSTD 0x00004000 49#define E1000_CTRL_EXT_PFRSTD 0x00004000
50#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 50#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */
51#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 51#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
52#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 52#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
53#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 53#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
54#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 54#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
55#define E1000_CTRL_EXT_EIAME 0x01000000 55#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
56#define E1000_CTRL_EXT_IRCA 0x00000001 56#define E1000_CTRL_EXT_EIAME 0x01000000
57#define E1000_CTRL_EXT_IRCA 0x00000001
57/* Interrupt delay cancellation */ 58/* Interrupt delay cancellation */
58/* Driver loaded bit for FW */ 59/* Driver loaded bit for FW */
59#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 60#define E1000_CTRL_EXT_DRV_LOAD 0x10000000
@@ -62,6 +63,7 @@
62/* packet buffer parity error detection enabled */ 63/* packet buffer parity error detection enabled */
63/* descriptor FIFO parity error detection enable */ 64/* descriptor FIFO parity error detection enable */
64#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ 65#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
66#define E1000_CTRL_EXT_PHYPDEN 0x00100000
65#define E1000_I2CCMD_REG_ADDR_SHIFT 16 67#define E1000_I2CCMD_REG_ADDR_SHIFT 16
66#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 68#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
67#define E1000_I2CCMD_OPCODE_READ 0x08000000 69#define E1000_I2CCMD_OPCODE_READ 0x08000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 89925e405849..ce55ea5d750c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -567,4 +567,7 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
567/* These functions must be implemented by drivers */ 567/* These functions must be implemented by drivers */
568s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); 568s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
569s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); 569s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
570
571void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
572void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
570#endif /* _E1000_HW_H_ */ 573#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 337161f440dd..65d931669f81 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -834,3 +834,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
834 } 834 }
835 return ret_val; 835 return ret_val;
836} 836}
837
838/**
839 * igb_pll_workaround_i210
840 * @hw: pointer to the HW structure
841 *
842 * Works around an errata in the PLL circuit where it occasionally
843 * provides the wrong clock frequency after power up.
844 **/
845s32 igb_pll_workaround_i210(struct e1000_hw *hw)
846{
847 s32 ret_val;
848 u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
849 u16 nvm_word, phy_word, pci_word, tmp_nvm;
850 int i;
851
852 /* Get and set needed register values */
853 wuc = rd32(E1000_WUC);
854 mdicnfg = rd32(E1000_MDICNFG);
855 reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
856 wr32(E1000_MDICNFG, reg_val);
857
858 /* Get data from NVM, or set default */
859 ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
860 &nvm_word);
861 if (ret_val)
862 nvm_word = E1000_INVM_DEFAULT_AL;
863 tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
864 for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
865 /* check current state directly from internal PHY */
866 igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
867 E1000_PHY_PLL_FREQ_REG), &phy_word);
868 if ((phy_word & E1000_PHY_PLL_UNCONF)
869 != E1000_PHY_PLL_UNCONF) {
870 ret_val = 0;
871 break;
872 } else {
873 ret_val = -E1000_ERR_PHY;
874 }
875 /* directly reset the internal PHY */
876 ctrl = rd32(E1000_CTRL);
877 wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
878
879 ctrl_ext = rd32(E1000_CTRL_EXT);
880 ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
881 wr32(E1000_CTRL_EXT, ctrl_ext);
882
883 wr32(E1000_WUC, 0);
884 reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
885 wr32(E1000_EEARBC_I210, reg_val);
886
887 igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
888 pci_word |= E1000_PCI_PMCSR_D3;
889 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
890 usleep_range(1000, 2000);
891 pci_word &= ~E1000_PCI_PMCSR_D3;
892 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
893 reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
894 wr32(E1000_EEARBC_I210, reg_val);
895
896 /* restore WUC register */
897 wr32(E1000_WUC, wuc);
898 }
899 /* restore MDICNFG setting */
900 wr32(E1000_MDICNFG, mdicnfg);
901 return ret_val;
902}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 9f34976687ba..3442b6357d01 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -33,6 +33,7 @@ s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
33s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); 33s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
34s32 igb_init_nvm_params_i210(struct e1000_hw *hw); 34s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
35bool igb_get_flash_presence_i210(struct e1000_hw *hw); 35bool igb_get_flash_presence_i210(struct e1000_hw *hw);
36s32 igb_pll_workaround_i210(struct e1000_hw *hw);
36 37
37#define E1000_STM_OPCODE 0xDB00 38#define E1000_STM_OPCODE 0xDB00
38#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 39#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
@@ -78,4 +79,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
78#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 79#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
79#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C 80#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
80 81
82/* PLL Defines */
83#define E1000_PCI_PMCSR 0x44
84#define E1000_PCI_PMCSR_D3 0x03
85#define E1000_MAX_PLL_TRIES 5
86#define E1000_PHY_PLL_UNCONF 0xFF
87#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000
88#define E1000_PHY_PLL_FREQ_REG 0x000E
89#define E1000_INVM_DEFAULT_AL 0x202F
90#define E1000_INVM_AUTOLOAD 0x0A
91#define E1000_INVM_PLL_WO_VAL 0x0010
92
81#endif 93#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 1cc4b1a7e597..f5ba4e4eafb9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -66,6 +66,7 @@
66#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ 66#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
67#define E1000_PBS 0x01008 /* Packet Buffer Size */ 67#define E1000_PBS 0x01008 /* Packet Buffer Size */
68#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ 68#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
69#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
69#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ 70#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
70#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ 71#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
71#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ 72#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 5759a56aab00..4d2dc17fd31b 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7217,6 +7217,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7217 } 7217 }
7218} 7218}
7219 7219
7220void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
7221{
7222 struct igb_adapter *adapter = hw->back;
7223
7224 pci_read_config_word(adapter->pdev, reg, value);
7225}
7226
7227void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
7228{
7229 struct igb_adapter *adapter = hw->back;
7230
7231 pci_write_config_word(adapter->pdev, reg, *value);
7232}
7233
7220s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) 7234s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
7221{ 7235{
7222 struct igb_adapter *adapter = hw->back; 7236 struct igb_adapter *adapter = hw->back;
@@ -7580,6 +7594,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
7580 7594
7581 if (netif_running(netdev)) 7595 if (netif_running(netdev))
7582 igb_close(netdev); 7596 igb_close(netdev);
7597 else
7598 igb_reset(adapter);
7583 7599
7584 igb_clear_interrupt_scheme(adapter); 7600 igb_clear_interrupt_scheme(adapter);
7585 7601
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 45beca17fa50..dadd9a5f6323 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1207,7 +1207,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1207 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; 1207 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1208 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; 1208 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1209 1209
1210 if (l3_proto == swab16(ETH_P_IP)) 1210 if (l3_proto == htons(ETH_P_IP))
1211 command |= MVNETA_TXD_IP_CSUM; 1211 command |= MVNETA_TXD_IP_CSUM;
1212 else 1212 else
1213 command |= MVNETA_TX_L3_IP6; 1213 command |= MVNETA_TX_L3_IP6;
@@ -2529,7 +2529,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
2529 2529
2530 if (phydev->speed == SPEED_1000) 2530 if (phydev->speed == SPEED_1000)
2531 val |= MVNETA_GMAC_CONFIG_GMII_SPEED; 2531 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
2532 else 2532 else if (phydev->speed == SPEED_100)
2533 val |= MVNETA_GMAC_CONFIG_MII_SPEED; 2533 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2534 2534
2535 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 2535 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 80f725228f5b..56022d647837 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -294,8 +294,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
294 init_completion(&cq->free); 294 init_completion(&cq->free);
295 295
296 cq->irq = priv->eq_table.eq[cq->vector].irq; 296 cq->irq = priv->eq_table.eq[cq->vector].irq;
297 cq->irq_affinity_change = false;
298
299 return 0; 297 return 0;
300 298
301err_radix: 299err_radix:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 4b2130760eed..14c00048bbec 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -128,6 +128,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
128 mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n", 128 mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
129 name); 129 name);
130 } 130 }
131
132 cq->irq_desc =
133 irq_to_desc(mlx4_eq_get_irq(mdev->dev,
134 cq->vector));
131 } 135 }
132 } else { 136 } else {
133 cq->vector = (cq->ring + 1 + priv->port) % 137 cq->vector = (cq->ring + 1 + priv->port) %
@@ -187,8 +191,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
187 mlx4_en_unmap_buffer(&cq->wqres.buf); 191 mlx4_en_unmap_buffer(&cq->wqres.buf);
188 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 192 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
189 if (priv->mdev->dev->caps.comp_pool && cq->vector) { 193 if (priv->mdev->dev->caps.comp_pool && cq->vector) {
190 if (!cq->is_tx)
191 irq_set_affinity_hint(cq->mcq.irq, NULL);
192 mlx4_release_eq(priv->mdev->dev, cq->vector); 194 mlx4_release_eq(priv->mdev->dev, cq->vector);
193 } 195 }
194 cq->vector = 0; 196 cq->vector = 0;
@@ -204,6 +206,7 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
204 if (!cq->is_tx) { 206 if (!cq->is_tx) {
205 napi_hash_del(&cq->napi); 207 napi_hash_del(&cq->napi);
206 synchronize_rcu(); 208 synchronize_rcu();
209 irq_set_affinity_hint(cq->mcq.irq, NULL);
207 } 210 }
208 netif_napi_del(&cq->napi); 211 netif_napi_del(&cq->napi);
209 212
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index fa1a069e14e6..68d763d2d030 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -417,6 +417,8 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
417 417
418 coal->tx_coalesce_usecs = priv->tx_usecs; 418 coal->tx_coalesce_usecs = priv->tx_usecs;
419 coal->tx_max_coalesced_frames = priv->tx_frames; 419 coal->tx_max_coalesced_frames = priv->tx_frames;
420 coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
421
420 coal->rx_coalesce_usecs = priv->rx_usecs; 422 coal->rx_coalesce_usecs = priv->rx_usecs;
421 coal->rx_max_coalesced_frames = priv->rx_frames; 423 coal->rx_max_coalesced_frames = priv->rx_frames;
422 424
@@ -426,6 +428,7 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
426 coal->rx_coalesce_usecs_high = priv->rx_usecs_high; 428 coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
427 coal->rate_sample_interval = priv->sample_interval; 429 coal->rate_sample_interval = priv->sample_interval;
428 coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal; 430 coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
431
429 return 0; 432 return 0;
430} 433}
431 434
@@ -434,6 +437,9 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
434{ 437{
435 struct mlx4_en_priv *priv = netdev_priv(dev); 438 struct mlx4_en_priv *priv = netdev_priv(dev);
436 439
440 if (!coal->tx_max_coalesced_frames_irq)
441 return -EINVAL;
442
437 priv->rx_frames = (coal->rx_max_coalesced_frames == 443 priv->rx_frames = (coal->rx_max_coalesced_frames ==
438 MLX4_EN_AUTO_CONF) ? 444 MLX4_EN_AUTO_CONF) ?
439 MLX4_EN_RX_COAL_TARGET : 445 MLX4_EN_RX_COAL_TARGET :
@@ -457,6 +463,7 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
457 priv->rx_usecs_high = coal->rx_coalesce_usecs_high; 463 priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
458 priv->sample_interval = coal->rate_sample_interval; 464 priv->sample_interval = coal->rate_sample_interval;
459 priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; 465 priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
466 priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
460 467
461 return mlx4_en_moderation_update(priv); 468 return mlx4_en_moderation_update(priv);
462} 469}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f384b354c88d..887cf01d831d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2331,7 +2331,7 @@ static void mlx4_en_add_vxlan_port(struct net_device *dev,
2331 struct mlx4_en_priv *priv = netdev_priv(dev); 2331 struct mlx4_en_priv *priv = netdev_priv(dev);
2332 __be16 current_port; 2332 __be16 current_port;
2333 2333
2334 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)) 2334 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2335 return; 2335 return;
2336 2336
2337 if (sa_family == AF_INET6) 2337 if (sa_family == AF_INET6)
@@ -2468,6 +2468,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2468 MLX4_WQE_CTRL_SOLICITED); 2468 MLX4_WQE_CTRL_SOLICITED);
2469 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 2469 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2470 priv->tx_ring_num = prof->tx_ring_num; 2470 priv->tx_ring_num = prof->tx_ring_num;
2471 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
2471 2472
2472 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, 2473 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
2473 GFP_KERNEL); 2474 GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index b8ec9208e12a..7765a08f9e84 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -40,6 +40,7 @@
40#include <linux/if_ether.h> 40#include <linux/if_ether.h>
41#include <linux/if_vlan.h> 41#include <linux/if_vlan.h>
42#include <linux/vmalloc.h> 42#include <linux/vmalloc.h>
43#include <linux/irq.h>
43 44
44#include "mlx4_en.h" 45#include "mlx4_en.h"
45 46
@@ -782,6 +783,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
782 PKT_HASH_TYPE_L3); 783 PKT_HASH_TYPE_L3);
783 784
784 skb_record_rx_queue(gro_skb, cq->ring); 785 skb_record_rx_queue(gro_skb, cq->ring);
786 skb_mark_napi_id(gro_skb, &cq->napi);
785 787
786 if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { 788 if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
787 timestamp = mlx4_en_get_cqe_ts(cqe); 789 timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -896,16 +898,25 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
896 898
897 /* If we used up all the quota - we're probably not done yet... */ 899 /* If we used up all the quota - we're probably not done yet... */
898 if (done == budget) { 900 if (done == budget) {
901 int cpu_curr;
902 const struct cpumask *aff;
903
899 INC_PERF_COUNTER(priv->pstats.napi_quota); 904 INC_PERF_COUNTER(priv->pstats.napi_quota);
900 if (unlikely(cq->mcq.irq_affinity_change)) { 905
901 cq->mcq.irq_affinity_change = false; 906 cpu_curr = smp_processor_id();
907 aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
908
909 if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) {
910 /* Current cpu is not according to smp_irq_affinity -
911 * probably affinity changed. need to stop this NAPI
912 * poll, and restart it on the right CPU
913 */
902 napi_complete(napi); 914 napi_complete(napi);
903 mlx4_en_arm_cq(priv, cq); 915 mlx4_en_arm_cq(priv, cq);
904 return 0; 916 return 0;
905 } 917 }
906 } else { 918 } else {
907 /* Done for now */ 919 /* Done for now */
908 cq->mcq.irq_affinity_change = false;
909 napi_complete(napi); 920 napi_complete(napi);
910 mlx4_en_arm_cq(priv, cq); 921 mlx4_en_arm_cq(priv, cq);
911 } 922 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 8be7483f8236..5045bab59633 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -351,9 +351,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
351 return cnt; 351 return cnt;
352} 352}
353 353
354static int mlx4_en_process_tx_cq(struct net_device *dev, 354static bool mlx4_en_process_tx_cq(struct net_device *dev,
355 struct mlx4_en_cq *cq, 355 struct mlx4_en_cq *cq)
356 int budget)
357{ 356{
358 struct mlx4_en_priv *priv = netdev_priv(dev); 357 struct mlx4_en_priv *priv = netdev_priv(dev);
359 struct mlx4_cq *mcq = &cq->mcq; 358 struct mlx4_cq *mcq = &cq->mcq;
@@ -372,9 +371,10 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
372 int factor = priv->cqe_factor; 371 int factor = priv->cqe_factor;
373 u64 timestamp = 0; 372 u64 timestamp = 0;
374 int done = 0; 373 int done = 0;
374 int budget = priv->tx_work_limit;
375 375
376 if (!priv->port_up) 376 if (!priv->port_up)
377 return 0; 377 return true;
378 378
379 index = cons_index & size_mask; 379 index = cons_index & size_mask;
380 cqe = &buf[(index << factor) + factor]; 380 cqe = &buf[(index << factor) + factor];
@@ -447,7 +447,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
447 netif_tx_wake_queue(ring->tx_queue); 447 netif_tx_wake_queue(ring->tx_queue);
448 ring->wake_queue++; 448 ring->wake_queue++;
449 } 449 }
450 return done; 450 return done < budget;
451} 451}
452 452
453void mlx4_en_tx_irq(struct mlx4_cq *mcq) 453void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -467,24 +467,16 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
467 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); 467 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
468 struct net_device *dev = cq->dev; 468 struct net_device *dev = cq->dev;
469 struct mlx4_en_priv *priv = netdev_priv(dev); 469 struct mlx4_en_priv *priv = netdev_priv(dev);
470 int done; 470 int clean_complete;
471 471
472 done = mlx4_en_process_tx_cq(dev, cq, budget); 472 clean_complete = mlx4_en_process_tx_cq(dev, cq);
473 if (!clean_complete)
474 return budget;
473 475
474 /* If we used up all the quota - we're probably not done yet... */ 476 napi_complete(napi);
475 if (done < budget) { 477 mlx4_en_arm_cq(priv, cq);
476 /* Done for now */ 478
477 cq->mcq.irq_affinity_change = false; 479 return 0;
478 napi_complete(napi);
479 mlx4_en_arm_cq(priv, cq);
480 return done;
481 } else if (unlikely(cq->mcq.irq_affinity_change)) {
482 cq->mcq.irq_affinity_change = false;
483 napi_complete(napi);
484 mlx4_en_arm_cq(priv, cq);
485 return 0;
486 }
487 return budget;
488} 480}
489 481
490static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, 482static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index d954ec1eac17..2a004b347e1d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -53,11 +53,6 @@ enum {
53 MLX4_EQ_ENTRY_SIZE = 0x20 53 MLX4_EQ_ENTRY_SIZE = 0x20
54}; 54};
55 55
56struct mlx4_irq_notify {
57 void *arg;
58 struct irq_affinity_notify notify;
59};
60
61#define MLX4_EQ_STATUS_OK ( 0 << 28) 56#define MLX4_EQ_STATUS_OK ( 0 << 28)
62#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) 57#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
63#define MLX4_EQ_OWNER_SW ( 0 << 24) 58#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -1088,57 +1083,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
1088 iounmap(priv->clr_base); 1083 iounmap(priv->clr_base);
1089} 1084}
1090 1085
1091static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
1092 const cpumask_t *mask)
1093{
1094 struct mlx4_irq_notify *n = container_of(notify,
1095 struct mlx4_irq_notify,
1096 notify);
1097 struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
1098 struct radix_tree_iter iter;
1099 void **slot;
1100
1101 radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
1102 struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
1103
1104 if (cq->irq == notify->irq)
1105 cq->irq_affinity_change = true;
1106 }
1107}
1108
1109static void mlx4_release_irq_notifier(struct kref *ref)
1110{
1111 struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
1112 notify.kref);
1113 kfree(n);
1114}
1115
1116static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
1117 struct mlx4_dev *dev, int irq)
1118{
1119 struct mlx4_irq_notify *irq_notifier = NULL;
1120 int err = 0;
1121
1122 irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
1123 if (!irq_notifier) {
1124 mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
1125 irq);
1126 return;
1127 }
1128
1129 irq_notifier->notify.irq = irq;
1130 irq_notifier->notify.notify = mlx4_irq_notifier_notify;
1131 irq_notifier->notify.release = mlx4_release_irq_notifier;
1132 irq_notifier->arg = priv;
1133 err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
1134 if (err) {
1135 kfree(irq_notifier);
1136 irq_notifier = NULL;
1137 mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
1138 }
1139}
1140
1141
1142int mlx4_alloc_eq_table(struct mlx4_dev *dev) 1086int mlx4_alloc_eq_table(struct mlx4_dev *dev)
1143{ 1087{
1144 struct mlx4_priv *priv = mlx4_priv(dev); 1088 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1409,8 +1353,6 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1409 continue; 1353 continue;
1410 /*we dont want to break here*/ 1354 /*we dont want to break here*/
1411 } 1355 }
1412 mlx4_assign_irq_notifier(priv, dev,
1413 priv->eq_table.eq[vec].irq);
1414 1356
1415 eq_set_ci(&priv->eq_table.eq[vec], 1); 1357 eq_set_ci(&priv->eq_table.eq[vec], 1);
1416 } 1358 }
@@ -1427,6 +1369,14 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1427} 1369}
1428EXPORT_SYMBOL(mlx4_assign_eq); 1370EXPORT_SYMBOL(mlx4_assign_eq);
1429 1371
1372int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
1373{
1374 struct mlx4_priv *priv = mlx4_priv(dev);
1375
1376 return priv->eq_table.eq[vec].irq;
1377}
1378EXPORT_SYMBOL(mlx4_eq_get_irq);
1379
1430void mlx4_release_eq(struct mlx4_dev *dev, int vec) 1380void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1431{ 1381{
1432 struct mlx4_priv *priv = mlx4_priv(dev); 1382 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1438,9 +1388,6 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1438 Belonging to a legacy EQ*/ 1388 Belonging to a legacy EQ*/
1439 mutex_lock(&priv->msix_ctl.pool_lock); 1389 mutex_lock(&priv->msix_ctl.pool_lock);
1440 if (priv->msix_ctl.pool_bm & 1ULL << i) { 1390 if (priv->msix_ctl.pool_bm & 1ULL << i) {
1441 irq_set_affinity_notifier(
1442 priv->eq_table.eq[vec].irq,
1443 NULL);
1444 free_irq(priv->eq_table.eq[vec].irq, 1391 free_irq(priv->eq_table.eq[vec].irq,
1445 &priv->eq_table.eq[vec]); 1392 &priv->eq_table.eq[vec]);
1446 priv->msix_ctl.pool_bm &= ~(1ULL << i); 1393 priv->msix_ctl.pool_bm &= ~(1ULL << i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 7c1b5ec5378f..2b19dd1f2c5d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -126,6 +126,8 @@ enum {
126#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \ 126#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
127 MLX4_EN_NUM_UP) 127 MLX4_EN_NUM_UP)
128 128
129#define MLX4_EN_DEFAULT_TX_WORK 256
130
129/* Target number of packets to coalesce with interrupt moderation */ 131/* Target number of packets to coalesce with interrupt moderation */
130#define MLX4_EN_RX_COAL_TARGET 44 132#define MLX4_EN_RX_COAL_TARGET 44
131#define MLX4_EN_RX_COAL_TIME 0x10 133#define MLX4_EN_RX_COAL_TIME 0x10
@@ -341,6 +343,7 @@ struct mlx4_en_cq {
341#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) 343#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
342 spinlock_t poll_lock; /* protects from LLS/napi conflicts */ 344 spinlock_t poll_lock; /* protects from LLS/napi conflicts */
343#endif /* CONFIG_NET_RX_BUSY_POLL */ 345#endif /* CONFIG_NET_RX_BUSY_POLL */
346 struct irq_desc *irq_desc;
344}; 347};
345 348
346struct mlx4_en_port_profile { 349struct mlx4_en_port_profile {
@@ -540,6 +543,7 @@ struct mlx4_en_priv {
540 __be32 ctrl_flags; 543 __be32 ctrl_flags;
541 u32 flags; 544 u32 flags;
542 u8 num_tx_rings_p_up; 545 u8 num_tx_rings_p_up;
546 u32 tx_work_limit;
543 u32 tx_ring_num; 547 u32 tx_ring_num;
544 u32 rx_ring_num; 548 u32 rx_ring_num;
545 u32 rx_skb_size; 549 u32 rx_skb_size;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 51c78ce27b37..6175bd59190a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -540,6 +540,7 @@ enum rtl_register_content {
540 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ 540 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
541 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ 541 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
542 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ 542 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
543 Rdy_to_L23 = (1 << 1), /* L23 Enable */
543 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ 544 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
544 545
545 /* Config4 register */ 546 /* Config4 register */
@@ -4883,6 +4884,21 @@ static void rtl_enable_clock_request(struct pci_dev *pdev)
4883 PCI_EXP_LNKCTL_CLKREQ_EN); 4884 PCI_EXP_LNKCTL_CLKREQ_EN);
4884} 4885}
4885 4886
4887static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
4888{
4889 void __iomem *ioaddr = tp->mmio_addr;
4890 u8 data;
4891
4892 data = RTL_R8(Config3);
4893
4894 if (enable)
4895 data |= Rdy_to_L23;
4896 else
4897 data &= ~Rdy_to_L23;
4898
4899 RTL_W8(Config3, data);
4900}
4901
4886#define R8168_CPCMD_QUIRK_MASK (\ 4902#define R8168_CPCMD_QUIRK_MASK (\
4887 EnableBist | \ 4903 EnableBist | \
4888 Mac_dbgo_oe | \ 4904 Mac_dbgo_oe | \
@@ -5232,6 +5248,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
5232 }; 5248 };
5233 5249
5234 rtl_hw_start_8168f(tp); 5250 rtl_hw_start_8168f(tp);
5251 rtl_pcie_state_l2l3_enable(tp, false);
5235 5252
5236 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); 5253 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5237 5254
@@ -5270,6 +5287,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5270 5287
5271 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); 5288 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
5272 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); 5289 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
5290
5291 rtl_pcie_state_l2l3_enable(tp, false);
5273} 5292}
5274 5293
5275static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) 5294static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
@@ -5522,6 +5541,8 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5522 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 5541 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5523 5542
5524 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); 5543 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5544
5545 rtl_pcie_state_l2l3_enable(tp, false);
5525} 5546}
5526 5547
5527static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) 5548static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
@@ -5557,6 +5578,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
5557 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5578 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5558 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5579 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5559 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); 5580 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5581
5582 rtl_pcie_state_l2l3_enable(tp, false);
5560} 5583}
5561 5584
5562static void rtl_hw_start_8106(struct rtl8169_private *tp) 5585static void rtl_hw_start_8106(struct rtl8169_private *tp)
@@ -5569,6 +5592,8 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
5569 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); 5592 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5570 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); 5593 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5571 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); 5594 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5595
5596 rtl_pcie_state_l2l3_enable(tp, false);
5572} 5597}
5573 5598
5574static void rtl_hw_start_8101(struct net_device *dev) 5599static void rtl_hw_start_8101(struct net_device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b3e148ef5683..9d3748361a1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -320,11 +320,8 @@ static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
320 320
321static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart) 321static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart)
322{ 322{
323 u32 value;
324
325 value = readl(ioaddr + GMAC_AN_CTRL);
326 /* auto negotiation enable and External Loopback enable */ 323 /* auto negotiation enable and External Loopback enable */
327 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE; 324 u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
328 325
329 if (restart) 326 if (restart)
330 value |= GMAC_AN_CTRL_RAN; 327 value |= GMAC_AN_CTRL_RAN;
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 7e6628a91514..1e2bcf5f89e1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -145,7 +145,7 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
145 x->rx_msg_type_delay_req++; 145 x->rx_msg_type_delay_req++;
146 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP) 146 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
147 x->rx_msg_type_delay_resp++; 147 x->rx_msg_type_delay_resp++;
148 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ) 148 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ)
149 x->rx_msg_type_pdelay_req++; 149 x->rx_msg_type_pdelay_req++;
150 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP) 150 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
151 x->rx_msg_type_pdelay_resp++; 151 x->rx_msg_type_pdelay_resp++;
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 6ea59ece7e0b..6eb849a56da5 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -292,7 +292,11 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
292 292
293static int dfx_rcv_init(DFX_board_t *bp, int get_buffers); 293static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
294static void dfx_rcv_queue_process(DFX_board_t *bp); 294static void dfx_rcv_queue_process(DFX_board_t *bp);
295#ifdef DYNAMIC_BUFFERS
295static void dfx_rcv_flush(DFX_board_t *bp); 296static void dfx_rcv_flush(DFX_board_t *bp);
297#else
298static inline void dfx_rcv_flush(DFX_board_t *bp) {}
299#endif
296 300
297static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb, 301static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
298 struct net_device *dev); 302 struct net_device *dev);
@@ -2849,7 +2853,7 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2849 * Align an sk_buff to a boundary power of 2 2853 * Align an sk_buff to a boundary power of 2
2850 * 2854 *
2851 */ 2855 */
2852 2856#ifdef DYNAMIC_BUFFERS
2853static void my_skb_align(struct sk_buff *skb, int n) 2857static void my_skb_align(struct sk_buff *skb, int n)
2854{ 2858{
2855 unsigned long x = (unsigned long)skb->data; 2859 unsigned long x = (unsigned long)skb->data;
@@ -2859,7 +2863,7 @@ static void my_skb_align(struct sk_buff *skb, int n)
2859 2863
2860 skb_reserve(skb, v - x); 2864 skb_reserve(skb, v - x);
2861} 2865}
2862 2866#endif
2863 2867
2864/* 2868/*
2865 * ================ 2869 * ================
@@ -3108,10 +3112,7 @@ static void dfx_rcv_queue_process(
3108 break; 3112 break;
3109 } 3113 }
3110 else { 3114 else {
3111#ifndef DYNAMIC_BUFFERS 3115 if (!rx_in_place) {
3112 if (! rx_in_place)
3113#endif
3114 {
3115 /* Receive buffer allocated, pass receive packet up */ 3116 /* Receive buffer allocated, pass receive packet up */
3116 dma_sync_single_for_cpu( 3117 dma_sync_single_for_cpu(
3117 bp->bus_dev, 3118 bp->bus_dev,
@@ -3505,10 +3506,6 @@ static void dfx_rcv_flush( DFX_board_t *bp )
3505 } 3506 }
3506 3507
3507 } 3508 }
3508#else
3509static inline void dfx_rcv_flush( DFX_board_t *bp )
3510{
3511}
3512#endif /* DYNAMIC_BUFFERS */ 3509#endif /* DYNAMIC_BUFFERS */
3513 3510
3514/* 3511/*
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 76fbd3948736..255c21ff274c 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1341,15 +1341,15 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
1341{ 1341{
1342 struct dp83640_private *dp83640 = phydev->priv; 1342 struct dp83640_private *dp83640 = phydev->priv;
1343 1343
1344 if (!dp83640->hwts_rx_en)
1345 return false;
1346
1347 if (is_status_frame(skb, type)) { 1344 if (is_status_frame(skb, type)) {
1348 decode_status_frame(dp83640, skb); 1345 decode_status_frame(dp83640, skb);
1349 kfree_skb(skb); 1346 kfree_skb(skb);
1350 return true; 1347 return true;
1351 } 1348 }
1352 1349
1350 if (!dp83640->hwts_rx_en)
1351 return false;
1352
1353 SKB_PTP_TYPE(skb) = type; 1353 SKB_PTP_TYPE(skb) = type;
1354 skb_queue_tail(&dp83640->rx_queue, skb); 1354 skb_queue_tail(&dp83640->rx_queue, skb);
1355 schedule_work(&dp83640->ts_work); 1355 schedule_work(&dp83640->ts_work);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2e58aa54484c..4eaadcfcb0fe 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -187,6 +187,50 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
187 return d ? to_mii_bus(d) : NULL; 187 return d ? to_mii_bus(d) : NULL;
188} 188}
189EXPORT_SYMBOL(of_mdio_find_bus); 189EXPORT_SYMBOL(of_mdio_find_bus);
190
191/* Walk the list of subnodes of a mdio bus and look for a node that matches the
192 * phy's address with its 'reg' property. If found, set the of_node pointer for
193 * the phy. This allows auto-probed pyh devices to be supplied with information
194 * passed in via DT.
195 */
196static void of_mdiobus_link_phydev(struct mii_bus *mdio,
197 struct phy_device *phydev)
198{
199 struct device *dev = &phydev->dev;
200 struct device_node *child;
201
202 if (dev->of_node || !mdio->dev.of_node)
203 return;
204
205 for_each_available_child_of_node(mdio->dev.of_node, child) {
206 int addr;
207 int ret;
208
209 ret = of_property_read_u32(child, "reg", &addr);
210 if (ret < 0) {
211 dev_err(dev, "%s has invalid PHY address\n",
212 child->full_name);
213 continue;
214 }
215
216 /* A PHY must have a reg property in the range [0-31] */
217 if (addr >= PHY_MAX_ADDR) {
218 dev_err(dev, "%s PHY address %i is too large\n",
219 child->full_name, addr);
220 continue;
221 }
222
223 if (addr == phydev->addr) {
224 dev->of_node = child;
225 return;
226 }
227 }
228}
229#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
230static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
231 struct phy_device *phydev)
232{
233}
190#endif 234#endif
191 235
192/** 236/**
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index c38ee903bd59..3ed16a89b5d8 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -539,7 +539,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
539{ 539{
540 struct sock_fprog uprog; 540 struct sock_fprog uprog;
541 struct sock_filter *code = NULL; 541 struct sock_filter *code = NULL;
542 int len, err; 542 int len;
543 543
544 if (copy_from_user(&uprog, arg, sizeof(uprog))) 544 if (copy_from_user(&uprog, arg, sizeof(uprog)))
545 return -EFAULT; 545 return -EFAULT;
@@ -554,12 +554,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
554 if (IS_ERR(code)) 554 if (IS_ERR(code))
555 return PTR_ERR(code); 555 return PTR_ERR(code);
556 556
557 err = sk_chk_filter(code, uprog.len);
558 if (err) {
559 kfree(code);
560 return err;
561 }
562
563 *p = code; 557 *p = code;
564 return uprog.len; 558 return uprog.len;
565} 559}
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 2ea7efd11857..6c9c16d76935 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
675 po->chan.hdrlen = (sizeof(struct pppoe_hdr) + 675 po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
676 dev->hard_header_len); 676 dev->hard_header_len);
677 677
678 po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr); 678 po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
679 po->chan.private = sk; 679 po->chan.private = sk;
680 po->chan.ops = &pppoe_chan_ops; 680 po->chan.ops = &pppoe_chan_ops;
681 681
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 50b36b299946..a36401802cec 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -258,10 +258,8 @@ struct hso_serial {
258 * so as not to drop characters on the floor. 258 * so as not to drop characters on the floor.
259 */ 259 */
260 int curr_rx_urb_idx; 260 int curr_rx_urb_idx;
261 u16 curr_rx_urb_offset;
262 u8 rx_urb_filled[MAX_RX_URBS]; 261 u8 rx_urb_filled[MAX_RX_URBS];
263 struct tasklet_struct unthrottle_tasklet; 262 struct tasklet_struct unthrottle_tasklet;
264 struct work_struct retry_unthrottle_workqueue;
265}; 263};
266 264
267struct hso_device { 265struct hso_device {
@@ -1252,14 +1250,6 @@ static void hso_unthrottle(struct tty_struct *tty)
1252 tasklet_hi_schedule(&serial->unthrottle_tasklet); 1250 tasklet_hi_schedule(&serial->unthrottle_tasklet);
1253} 1251}
1254 1252
1255static void hso_unthrottle_workfunc(struct work_struct *work)
1256{
1257 struct hso_serial *serial =
1258 container_of(work, struct hso_serial,
1259 retry_unthrottle_workqueue);
1260 hso_unthrottle_tasklet(serial);
1261}
1262
1263/* open the requested serial port */ 1253/* open the requested serial port */
1264static int hso_serial_open(struct tty_struct *tty, struct file *filp) 1254static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1265{ 1255{
@@ -1295,8 +1285,6 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1295 tasklet_init(&serial->unthrottle_tasklet, 1285 tasklet_init(&serial->unthrottle_tasklet,
1296 (void (*)(unsigned long))hso_unthrottle_tasklet, 1286 (void (*)(unsigned long))hso_unthrottle_tasklet,
1297 (unsigned long)serial); 1287 (unsigned long)serial);
1298 INIT_WORK(&serial->retry_unthrottle_workqueue,
1299 hso_unthrottle_workfunc);
1300 result = hso_start_serial_device(serial->parent, GFP_KERNEL); 1288 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
1301 if (result) { 1289 if (result) {
1302 hso_stop_serial_device(serial->parent); 1290 hso_stop_serial_device(serial->parent);
@@ -1345,7 +1333,6 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
1345 if (!usb_gone) 1333 if (!usb_gone)
1346 hso_stop_serial_device(serial->parent); 1334 hso_stop_serial_device(serial->parent);
1347 tasklet_kill(&serial->unthrottle_tasklet); 1335 tasklet_kill(&serial->unthrottle_tasklet);
1348 cancel_work_sync(&serial->retry_unthrottle_workqueue);
1349 } 1336 }
1350 1337
1351 if (!usb_gone) 1338 if (!usb_gone)
@@ -2013,8 +2000,7 @@ static void ctrl_callback(struct urb *urb)
2013static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial) 2000static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
2014{ 2001{
2015 struct tty_struct *tty; 2002 struct tty_struct *tty;
2016 int write_length_remaining = 0; 2003 int count;
2017 int curr_write_len;
2018 2004
2019 /* Sanity check */ 2005 /* Sanity check */
2020 if (urb == NULL || serial == NULL) { 2006 if (urb == NULL || serial == NULL) {
@@ -2024,29 +2010,28 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
2024 2010
2025 tty = tty_port_tty_get(&serial->port); 2011 tty = tty_port_tty_get(&serial->port);
2026 2012
2013 if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
2014 tty_kref_put(tty);
2015 return -1;
2016 }
2017
2027 /* Push data to tty */ 2018 /* Push data to tty */
2028 write_length_remaining = urb->actual_length -
2029 serial->curr_rx_urb_offset;
2030 D1("data to push to tty"); 2019 D1("data to push to tty");
2031 while (write_length_remaining) { 2020 count = tty_buffer_request_room(&serial->port, urb->actual_length);
2032 if (tty && test_bit(TTY_THROTTLED, &tty->flags)) { 2021 if (count >= urb->actual_length) {
2033 tty_kref_put(tty); 2022 tty_insert_flip_string(&serial->port, urb->transfer_buffer,
2034 return -1; 2023 urb->actual_length);
2035 }
2036 curr_write_len = tty_insert_flip_string(&serial->port,
2037 urb->transfer_buffer + serial->curr_rx_urb_offset,
2038 write_length_remaining);
2039 serial->curr_rx_urb_offset += curr_write_len;
2040 write_length_remaining -= curr_write_len;
2041 tty_flip_buffer_push(&serial->port); 2024 tty_flip_buffer_push(&serial->port);
2025 } else {
2026 dev_warn(&serial->parent->usb->dev,
2027 "dropping data, %d bytes lost\n", urb->actual_length);
2042 } 2028 }
2029
2043 tty_kref_put(tty); 2030 tty_kref_put(tty);
2044 2031
2045 if (write_length_remaining == 0) { 2032 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
2046 serial->curr_rx_urb_offset = 0; 2033
2047 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0; 2034 return 0;
2048 }
2049 return write_length_remaining;
2050} 2035}
2051 2036
2052 2037
@@ -2217,7 +2202,6 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
2217 } 2202 }
2218 } 2203 }
2219 serial->curr_rx_urb_idx = 0; 2204 serial->curr_rx_urb_idx = 0;
2220 serial->curr_rx_urb_offset = 0;
2221 2205
2222 if (serial->tx_urb) 2206 if (serial->tx_urb)
2223 usb_kill_urb(serial->tx_urb); 2207 usb_kill_urb(serial->tx_urb);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index cf62d7e8329f..c4638c67f6b9 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -741,6 +741,7 @@ static const struct usb_device_id products[] = {
741 {QMI_FIXED_INTF(0x19d2, 0x1424, 2)}, 741 {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
742 {QMI_FIXED_INTF(0x19d2, 0x1425, 2)}, 742 {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
743 {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ 743 {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
744 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
744 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 745 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
745 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 746 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
746 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 747 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 7d2dd80c4a7a..e1e430587868 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1367,7 +1367,7 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
1367 struct sk_buff_head seg_list; 1367 struct sk_buff_head seg_list;
1368 struct sk_buff *segs, *nskb; 1368 struct sk_buff *segs, *nskb;
1369 1369
1370 features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO); 1370 features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1371 segs = skb_gso_segment(skb, features); 1371 segs = skb_gso_segment(skb, features);
1372 if (IS_ERR(segs) || !segs) 1372 if (IS_ERR(segs) || !segs)
1373 goto drop; 1373 goto drop;
@@ -3213,8 +3213,13 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev,
3213 struct r8152 *tp = netdev_priv(dev); 3213 struct r8152 *tp = netdev_priv(dev);
3214 struct tally_counter tally; 3214 struct tally_counter tally;
3215 3215
3216 if (usb_autopm_get_interface(tp->intf) < 0)
3217 return;
3218
3216 generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA); 3219 generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA);
3217 3220
3221 usb_autopm_put_interface(tp->intf);
3222
3218 data[0] = le64_to_cpu(tally.tx_packets); 3223 data[0] = le64_to_cpu(tally.tx_packets);
3219 data[1] = le64_to_cpu(tally.rx_packets); 3224 data[1] = le64_to_cpu(tally.rx_packets);
3220 data[2] = le64_to_cpu(tally.tx_errors); 3225 data[2] = le64_to_cpu(tally.tx_errors);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 424db65e4396..d07bf4cb893f 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1714,6 +1714,18 @@ static int smsc95xx_resume(struct usb_interface *intf)
1714 return ret; 1714 return ret;
1715} 1715}
1716 1716
1717static int smsc95xx_reset_resume(struct usb_interface *intf)
1718{
1719 struct usbnet *dev = usb_get_intfdata(intf);
1720 int ret;
1721
1722 ret = smsc95xx_reset(dev);
1723 if (ret < 0)
1724 return ret;
1725
1726 return smsc95xx_resume(intf);
1727}
1728
1717static void smsc95xx_rx_csum_offload(struct sk_buff *skb) 1729static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
1718{ 1730{
1719 skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2); 1731 skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
@@ -2004,7 +2016,7 @@ static struct usb_driver smsc95xx_driver = {
2004 .probe = usbnet_probe, 2016 .probe = usbnet_probe,
2005 .suspend = smsc95xx_suspend, 2017 .suspend = smsc95xx_suspend,
2006 .resume = smsc95xx_resume, 2018 .resume = smsc95xx_resume,
2007 .reset_resume = smsc95xx_resume, 2019 .reset_resume = smsc95xx_reset_resume,
2008 .disconnect = usbnet_disconnect, 2020 .disconnect = usbnet_disconnect,
2009 .disable_hub_initiated_lpm = 1, 2021 .disable_hub_initiated_lpm = 1,
2010 .supports_autosuspend = 1, 2022 .supports_autosuspend = 1,
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 93ace042d0aa..1f041271f7fe 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2363,7 +2363,7 @@ static char *type_strings[] = {
2363 "FarSync TE1" 2363 "FarSync TE1"
2364}; 2364};
2365 2365
2366static void 2366static int
2367fst_init_card(struct fst_card_info *card) 2367fst_init_card(struct fst_card_info *card)
2368{ 2368{
2369 int i; 2369 int i;
@@ -2374,24 +2374,21 @@ fst_init_card(struct fst_card_info *card)
2374 * we'll have to revise it in some way then. 2374 * we'll have to revise it in some way then.
2375 */ 2375 */
2376 for (i = 0; i < card->nports; i++) { 2376 for (i = 0; i < card->nports; i++) {
2377 err = register_hdlc_device(card->ports[i].dev); 2377 err = register_hdlc_device(card->ports[i].dev);
2378 if (err < 0) { 2378 if (err < 0) {
2379 int j;
2380 pr_err("Cannot register HDLC device for port %d (errno %d)\n", 2379 pr_err("Cannot register HDLC device for port %d (errno %d)\n",
2381 i, -err); 2380 i, -err);
2382 for (j = i; j < card->nports; j++) { 2381 while (i--)
2383 free_netdev(card->ports[j].dev); 2382 unregister_hdlc_device(card->ports[i].dev);
2384 card->ports[j].dev = NULL; 2383 return err;
2385 } 2384 }
2386 card->nports = i;
2387 break;
2388 }
2389 } 2385 }
2390 2386
2391 pr_info("%s-%s: %s IRQ%d, %d ports\n", 2387 pr_info("%s-%s: %s IRQ%d, %d ports\n",
2392 port_to_dev(&card->ports[0])->name, 2388 port_to_dev(&card->ports[0])->name,
2393 port_to_dev(&card->ports[card->nports - 1])->name, 2389 port_to_dev(&card->ports[card->nports - 1])->name,
2394 type_strings[card->type], card->irq, card->nports); 2390 type_strings[card->type], card->irq, card->nports);
2391 return 0;
2395} 2392}
2396 2393
2397static const struct net_device_ops fst_ops = { 2394static const struct net_device_ops fst_ops = {
@@ -2447,15 +2444,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2447 /* Try to enable the device */ 2444 /* Try to enable the device */
2448 if ((err = pci_enable_device(pdev)) != 0) { 2445 if ((err = pci_enable_device(pdev)) != 0) {
2449 pr_err("Failed to enable card. Err %d\n", -err); 2446 pr_err("Failed to enable card. Err %d\n", -err);
2450 kfree(card); 2447 goto enable_fail;
2451 return err;
2452 } 2448 }
2453 2449
2454 if ((err = pci_request_regions(pdev, "FarSync")) !=0) { 2450 if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
2455 pr_err("Failed to allocate regions. Err %d\n", -err); 2451 pr_err("Failed to allocate regions. Err %d\n", -err);
2456 pci_disable_device(pdev); 2452 goto regions_fail;
2457 kfree(card);
2458 return err;
2459 } 2453 }
2460 2454
2461 /* Get virtual addresses of memory regions */ 2455 /* Get virtual addresses of memory regions */
@@ -2464,30 +2458,21 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2464 card->phys_ctlmem = pci_resource_start(pdev, 3); 2458 card->phys_ctlmem = pci_resource_start(pdev, 3);
2465 if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) { 2459 if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
2466 pr_err("Physical memory remap failed\n"); 2460 pr_err("Physical memory remap failed\n");
2467 pci_release_regions(pdev); 2461 err = -ENODEV;
2468 pci_disable_device(pdev); 2462 goto ioremap_physmem_fail;
2469 kfree(card);
2470 return -ENODEV;
2471 } 2463 }
2472 if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) { 2464 if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
2473 pr_err("Control memory remap failed\n"); 2465 pr_err("Control memory remap failed\n");
2474 pci_release_regions(pdev); 2466 err = -ENODEV;
2475 pci_disable_device(pdev); 2467 goto ioremap_ctlmem_fail;
2476 iounmap(card->mem);
2477 kfree(card);
2478 return -ENODEV;
2479 } 2468 }
2480 dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem); 2469 dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem);
2481 2470
2482 /* Register the interrupt handler */ 2471 /* Register the interrupt handler */
2483 if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) { 2472 if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) {
2484 pr_err("Unable to register interrupt %d\n", card->irq); 2473 pr_err("Unable to register interrupt %d\n", card->irq);
2485 pci_release_regions(pdev); 2474 err = -ENODEV;
2486 pci_disable_device(pdev); 2475 goto irq_fail;
2487 iounmap(card->ctlmem);
2488 iounmap(card->mem);
2489 kfree(card);
2490 return -ENODEV;
2491 } 2476 }
2492 2477
2493 /* Record info we need */ 2478 /* Record info we need */
@@ -2513,13 +2498,8 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2513 while (i--) 2498 while (i--)
2514 free_netdev(card->ports[i].dev); 2499 free_netdev(card->ports[i].dev);
2515 pr_err("FarSync: out of memory\n"); 2500 pr_err("FarSync: out of memory\n");
2516 free_irq(card->irq, card); 2501 err = -ENOMEM;
2517 pci_release_regions(pdev); 2502 goto hdlcdev_fail;
2518 pci_disable_device(pdev);
2519 iounmap(card->ctlmem);
2520 iounmap(card->mem);
2521 kfree(card);
2522 return -ENODEV;
2523 } 2503 }
2524 card->ports[i].dev = dev; 2504 card->ports[i].dev = dev;
2525 card->ports[i].card = card; 2505 card->ports[i].card = card;
@@ -2565,9 +2545,16 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2565 pci_set_drvdata(pdev, card); 2545 pci_set_drvdata(pdev, card);
2566 2546
2567 /* Remainder of card setup */ 2547 /* Remainder of card setup */
2548 if (no_of_cards_added >= FST_MAX_CARDS) {
2549 pr_err("FarSync: too many cards\n");
2550 err = -ENOMEM;
2551 goto card_array_fail;
2552 }
2568 fst_card_array[no_of_cards_added] = card; 2553 fst_card_array[no_of_cards_added] = card;
2569 card->card_no = no_of_cards_added++; /* Record instance and bump it */ 2554 card->card_no = no_of_cards_added++; /* Record instance and bump it */
2570 fst_init_card(card); 2555 err = fst_init_card(card);
2556 if (err)
2557 goto init_card_fail;
2571 if (card->family == FST_FAMILY_TXU) { 2558 if (card->family == FST_FAMILY_TXU) {
2572 /* 2559 /*
2573 * Allocate a dma buffer for transmit and receives 2560 * Allocate a dma buffer for transmit and receives
@@ -2577,29 +2564,46 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2577 &card->rx_dma_handle_card); 2564 &card->rx_dma_handle_card);
2578 if (card->rx_dma_handle_host == NULL) { 2565 if (card->rx_dma_handle_host == NULL) {
2579 pr_err("Could not allocate rx dma buffer\n"); 2566 pr_err("Could not allocate rx dma buffer\n");
2580 fst_disable_intr(card); 2567 err = -ENOMEM;
2581 pci_release_regions(pdev); 2568 goto rx_dma_fail;
2582 pci_disable_device(pdev);
2583 iounmap(card->ctlmem);
2584 iounmap(card->mem);
2585 kfree(card);
2586 return -ENOMEM;
2587 } 2569 }
2588 card->tx_dma_handle_host = 2570 card->tx_dma_handle_host =
2589 pci_alloc_consistent(card->device, FST_MAX_MTU, 2571 pci_alloc_consistent(card->device, FST_MAX_MTU,
2590 &card->tx_dma_handle_card); 2572 &card->tx_dma_handle_card);
2591 if (card->tx_dma_handle_host == NULL) { 2573 if (card->tx_dma_handle_host == NULL) {
2592 pr_err("Could not allocate tx dma buffer\n"); 2574 pr_err("Could not allocate tx dma buffer\n");
2593 fst_disable_intr(card); 2575 err = -ENOMEM;
2594 pci_release_regions(pdev); 2576 goto tx_dma_fail;
2595 pci_disable_device(pdev);
2596 iounmap(card->ctlmem);
2597 iounmap(card->mem);
2598 kfree(card);
2599 return -ENOMEM;
2600 } 2577 }
2601 } 2578 }
2602 return 0; /* Success */ 2579 return 0; /* Success */
2580
2581tx_dma_fail:
2582 pci_free_consistent(card->device, FST_MAX_MTU,
2583 card->rx_dma_handle_host,
2584 card->rx_dma_handle_card);
2585rx_dma_fail:
2586 fst_disable_intr(card);
2587 for (i = 0 ; i < card->nports ; i++)
2588 unregister_hdlc_device(card->ports[i].dev);
2589init_card_fail:
2590 fst_card_array[card->card_no] = NULL;
2591card_array_fail:
2592 for (i = 0 ; i < card->nports ; i++)
2593 free_netdev(card->ports[i].dev);
2594hdlcdev_fail:
2595 free_irq(card->irq, card);
2596irq_fail:
2597 iounmap(card->ctlmem);
2598ioremap_ctlmem_fail:
2599 iounmap(card->mem);
2600ioremap_physmem_fail:
2601 pci_release_regions(pdev);
2602regions_fail:
2603 pci_disable_device(pdev);
2604enable_fail:
2605 kfree(card);
2606 return err;
2603} 2607}
2604 2608
2605/* 2609/*
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 2ccb4a02368b..055222bae6e4 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1439,16 +1439,11 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1439 unsigned int i = 0; 1439 unsigned int i = 0;
1440 unsigned int num_queues = info->netdev->real_num_tx_queues; 1440 unsigned int num_queues = info->netdev->real_num_tx_queues;
1441 1441
1442 netif_carrier_off(info->netdev);
1443
1442 for (i = 0; i < num_queues; ++i) { 1444 for (i = 0; i < num_queues; ++i) {
1443 struct netfront_queue *queue = &info->queues[i]; 1445 struct netfront_queue *queue = &info->queues[i];
1444 1446
1445 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1446 spin_lock_bh(&queue->rx_lock);
1447 spin_lock_irq(&queue->tx_lock);
1448 netif_carrier_off(queue->info->netdev);
1449 spin_unlock_irq(&queue->tx_lock);
1450 spin_unlock_bh(&queue->rx_lock);
1451
1452 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 1447 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1453 unbind_from_irqhandler(queue->tx_irq, queue); 1448 unbind_from_irqhandler(queue->tx_irq, queue);
1454 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { 1449 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
@@ -1458,6 +1453,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1458 queue->tx_evtchn = queue->rx_evtchn = 0; 1453 queue->tx_evtchn = queue->rx_evtchn = 0;
1459 queue->tx_irq = queue->rx_irq = 0; 1454 queue->tx_irq = queue->rx_irq = 0;
1460 1455
1456 napi_synchronize(&queue->napi);
1457
1461 /* End access and free the pages */ 1458 /* End access and free the pages */
1462 xennet_end_access(queue->tx_ring_ref, queue->tx.sring); 1459 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1463 xennet_end_access(queue->rx_ring_ref, queue->rx.sring); 1460 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
@@ -2046,13 +2043,15 @@ static int xennet_connect(struct net_device *dev)
2046 /* By now, the queue structures have been set up */ 2043 /* By now, the queue structures have been set up */
2047 for (j = 0; j < num_queues; ++j) { 2044 for (j = 0; j < num_queues; ++j) {
2048 queue = &np->queues[j]; 2045 queue = &np->queues[j];
2049 spin_lock_bh(&queue->rx_lock);
2050 spin_lock_irq(&queue->tx_lock);
2051 2046
2052 /* Step 1: Discard all pending TX packet fragments. */ 2047 /* Step 1: Discard all pending TX packet fragments. */
2048 spin_lock_irq(&queue->tx_lock);
2053 xennet_release_tx_bufs(queue); 2049 xennet_release_tx_bufs(queue);
2050 spin_unlock_irq(&queue->tx_lock);
2054 2051
2055 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 2052 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
2053 spin_lock_bh(&queue->rx_lock);
2054
2056 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 2055 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
2057 skb_frag_t *frag; 2056 skb_frag_t *frag;
2058 const struct page *page; 2057 const struct page *page;
@@ -2076,6 +2075,8 @@ static int xennet_connect(struct net_device *dev)
2076 } 2075 }
2077 2076
2078 queue->rx.req_prod_pvt = requeue_idx; 2077 queue->rx.req_prod_pvt = requeue_idx;
2078
2079 spin_unlock_bh(&queue->rx_lock);
2079 } 2080 }
2080 2081
2081 /* 2082 /*
@@ -2087,13 +2088,17 @@ static int xennet_connect(struct net_device *dev)
2087 netif_carrier_on(np->netdev); 2088 netif_carrier_on(np->netdev);
2088 for (j = 0; j < num_queues; ++j) { 2089 for (j = 0; j < num_queues; ++j) {
2089 queue = &np->queues[j]; 2090 queue = &np->queues[j];
2091
2090 notify_remote_via_irq(queue->tx_irq); 2092 notify_remote_via_irq(queue->tx_irq);
2091 if (queue->tx_irq != queue->rx_irq) 2093 if (queue->tx_irq != queue->rx_irq)
2092 notify_remote_via_irq(queue->rx_irq); 2094 notify_remote_via_irq(queue->rx_irq);
2093 xennet_tx_buf_gc(queue);
2094 xennet_alloc_rx_buffers(queue);
2095 2095
2096 spin_lock_irq(&queue->tx_lock);
2097 xennet_tx_buf_gc(queue);
2096 spin_unlock_irq(&queue->tx_lock); 2098 spin_unlock_irq(&queue->tx_lock);
2099
2100 spin_lock_bh(&queue->rx_lock);
2101 xennet_alloc_rx_buffers(queue);
2097 spin_unlock_bh(&queue->rx_lock); 2102 spin_unlock_bh(&queue->rx_lock);
2098 } 2103 }
2099 2104