diff options
Diffstat (limited to 'drivers/net/ethernet')
38 files changed, 388 insertions, 354 deletions
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index cab306a9888e..e1d26433d619 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c | |||
@@ -828,7 +828,7 @@ static int ax_probe(struct platform_device *pdev) | |||
828 | struct ei_device *ei_local; | 828 | struct ei_device *ei_local; |
829 | struct ax_device *ax; | 829 | struct ax_device *ax; |
830 | struct resource *irq, *mem, *mem2; | 830 | struct resource *irq, *mem, *mem2; |
831 | resource_size_t mem_size, mem2_size = 0; | 831 | unsigned long mem_size, mem2_size = 0; |
832 | int ret = 0; | 832 | int ret = 0; |
833 | 833 | ||
834 | dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); | 834 | dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); |
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h index 829b5ad71d0d..b5fd934585e9 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e.h +++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h | |||
@@ -186,7 +186,7 @@ struct atl1e_tpd_desc { | |||
186 | /* how about 0x2000 */ | 186 | /* how about 0x2000 */ |
187 | #define MAX_TX_BUF_LEN 0x2000 | 187 | #define MAX_TX_BUF_LEN 0x2000 |
188 | #define MAX_TX_BUF_SHIFT 13 | 188 | #define MAX_TX_BUF_SHIFT 13 |
189 | /*#define MAX_TX_BUF_LEN 0x3000 */ | 189 | #define MAX_TSO_SEG_SIZE 0x3c00 |
190 | 190 | ||
191 | /* rrs word 1 bit 0:31 */ | 191 | /* rrs word 1 bit 0:31 */ |
192 | #define RRS_RX_CSUM_MASK 0xFFFF | 192 | #define RRS_RX_CSUM_MASK 0xFFFF |
@@ -438,7 +438,6 @@ struct atl1e_adapter { | |||
438 | struct atl1e_hw hw; | 438 | struct atl1e_hw hw; |
439 | struct atl1e_hw_stats hw_stats; | 439 | struct atl1e_hw_stats hw_stats; |
440 | 440 | ||
441 | bool have_msi; | ||
442 | u32 wol; | 441 | u32 wol; |
443 | u16 link_speed; | 442 | u16 link_speed; |
444 | u16 link_duplex; | 443 | u16 link_duplex; |
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 92f4734f860d..ac25f05ff68f 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c | |||
@@ -1849,34 +1849,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter) | |||
1849 | struct net_device *netdev = adapter->netdev; | 1849 | struct net_device *netdev = adapter->netdev; |
1850 | 1850 | ||
1851 | free_irq(adapter->pdev->irq, netdev); | 1851 | free_irq(adapter->pdev->irq, netdev); |
1852 | |||
1853 | if (adapter->have_msi) | ||
1854 | pci_disable_msi(adapter->pdev); | ||
1855 | } | 1852 | } |
1856 | 1853 | ||
1857 | static int atl1e_request_irq(struct atl1e_adapter *adapter) | 1854 | static int atl1e_request_irq(struct atl1e_adapter *adapter) |
1858 | { | 1855 | { |
1859 | struct pci_dev *pdev = adapter->pdev; | 1856 | struct pci_dev *pdev = adapter->pdev; |
1860 | struct net_device *netdev = adapter->netdev; | 1857 | struct net_device *netdev = adapter->netdev; |
1861 | int flags = 0; | ||
1862 | int err = 0; | 1858 | int err = 0; |
1863 | 1859 | ||
1864 | adapter->have_msi = true; | 1860 | err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name, |
1865 | err = pci_enable_msi(pdev); | 1861 | netdev); |
1866 | if (err) { | ||
1867 | netdev_dbg(netdev, | ||
1868 | "Unable to allocate MSI interrupt Error: %d\n", err); | ||
1869 | adapter->have_msi = false; | ||
1870 | } | ||
1871 | |||
1872 | if (!adapter->have_msi) | ||
1873 | flags |= IRQF_SHARED; | ||
1874 | err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev); | ||
1875 | if (err) { | 1862 | if (err) { |
1876 | netdev_dbg(adapter->netdev, | 1863 | netdev_dbg(adapter->netdev, |
1877 | "Unable to allocate interrupt Error: %d\n", err); | 1864 | "Unable to allocate interrupt Error: %d\n", err); |
1878 | if (adapter->have_msi) | ||
1879 | pci_disable_msi(pdev); | ||
1880 | return err; | 1865 | return err; |
1881 | } | 1866 | } |
1882 | netdev_dbg(netdev, "atl1e_request_irq OK\n"); | 1867 | netdev_dbg(netdev, "atl1e_request_irq OK\n"); |
@@ -2344,6 +2329,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2344 | 2329 | ||
2345 | INIT_WORK(&adapter->reset_task, atl1e_reset_task); | 2330 | INIT_WORK(&adapter->reset_task, atl1e_reset_task); |
2346 | INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); | 2331 | INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); |
2332 | netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE); | ||
2347 | err = register_netdev(netdev); | 2333 | err = register_netdev(netdev); |
2348 | if (err) { | 2334 | if (err) { |
2349 | netdev_err(netdev, "register netdevice failed\n"); | 2335 | netdev_err(netdev, "register netdevice failed\n"); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4046f97378c2..57619dd4a92b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -2614,6 +2614,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
2614 | } | 2614 | } |
2615 | } | 2615 | } |
2616 | 2616 | ||
2617 | /* initialize FW coalescing state machines in RAM */ | ||
2618 | bnx2x_update_coalesce(bp); | ||
2619 | |||
2617 | /* setup the leading queue */ | 2620 | /* setup the leading queue */ |
2618 | rc = bnx2x_setup_leading(bp); | 2621 | rc = bnx2x_setup_leading(bp); |
2619 | if (rc) { | 2622 | if (rc) { |
@@ -4580,11 +4583,11 @@ static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, | |||
4580 | u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); | 4583 | u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); |
4581 | u32 addr = BAR_CSTRORM_INTMEM + | 4584 | u32 addr = BAR_CSTRORM_INTMEM + |
4582 | CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); | 4585 | CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); |
4583 | u16 flags = REG_RD16(bp, addr); | 4586 | u8 flags = REG_RD8(bp, addr); |
4584 | /* clear and set */ | 4587 | /* clear and set */ |
4585 | flags &= ~HC_INDEX_DATA_HC_ENABLED; | 4588 | flags &= ~HC_INDEX_DATA_HC_ENABLED; |
4586 | flags |= enable_flag; | 4589 | flags |= enable_flag; |
4587 | REG_WR16(bp, addr, flags); | 4590 | REG_WR8(bp, addr, flags); |
4588 | DP(NETIF_MSG_IFUP, | 4591 | DP(NETIF_MSG_IFUP, |
4589 | "port %x fw_sb_id %d sb_index %d disable %d\n", | 4592 | "port %x fw_sb_id %d sb_index %d disable %d\n", |
4590 | port, fw_sb_id, sb_index, disable); | 4593 | port, fw_sb_id, sb_index, disable); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 77ebae0ac64a..0283f343b0d1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
@@ -13437,13 +13437,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13437 | { | 13437 | { |
13438 | struct bnx2x *bp = params->bp; | 13438 | struct bnx2x *bp = params->bp; |
13439 | u16 base_page, next_page, not_kr2_device, lane; | 13439 | u16 base_page, next_page, not_kr2_device, lane; |
13440 | int sigdet = bnx2x_warpcore_get_sigdet(phy, params); | 13440 | int sigdet; |
13441 | |||
13442 | if (!sigdet) { | ||
13443 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) | ||
13444 | bnx2x_kr2_recovery(params, vars, phy); | ||
13445 | return; | ||
13446 | } | ||
13447 | 13441 | ||
13448 | /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery | 13442 | /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery |
13449 | * since some switches tend to reinit the AN process and clear the | 13443 | * since some switches tend to reinit the AN process and clear the |
@@ -13454,6 +13448,16 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13454 | vars->check_kr2_recovery_cnt--; | 13448 | vars->check_kr2_recovery_cnt--; |
13455 | return; | 13449 | return; |
13456 | } | 13450 | } |
13451 | |||
13452 | sigdet = bnx2x_warpcore_get_sigdet(phy, params); | ||
13453 | if (!sigdet) { | ||
13454 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { | ||
13455 | bnx2x_kr2_recovery(params, vars, phy); | ||
13456 | DP(NETIF_MSG_LINK, "No sigdet\n"); | ||
13457 | } | ||
13458 | return; | ||
13459 | } | ||
13460 | |||
13457 | lane = bnx2x_get_warpcore_lane(phy, params); | 13461 | lane = bnx2x_get_warpcore_lane(phy, params); |
13458 | CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, | 13462 | CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, |
13459 | MDIO_AER_BLOCK_AER_REG, lane); | 13463 | MDIO_AER_BLOCK_AER_REG, lane); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e81a747ea8ce..c50696b396f1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -4947,7 +4947,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp) | |||
4947 | q); | 4947 | q); |
4948 | } | 4948 | } |
4949 | 4949 | ||
4950 | if (!NO_FCOE(bp)) { | 4950 | if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) { |
4951 | fp = &bp->fp[FCOE_IDX(bp)]; | 4951 | fp = &bp->fp[FCOE_IDX(bp)]; |
4952 | queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; | 4952 | queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; |
4953 | 4953 | ||
@@ -9878,6 +9878,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
9878 | REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); | 9878 | REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); |
9879 | } | 9879 | } |
9880 | } | 9880 | } |
9881 | if (!CHIP_IS_E1x(bp)) | ||
9882 | /* block FW from writing to host */ | ||
9883 | REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); | ||
9884 | |||
9881 | /* wait until BRB is empty */ | 9885 | /* wait until BRB is empty */ |
9882 | tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); | 9886 | tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); |
9883 | while (timer_count) { | 9887 | while (timer_count) { |
@@ -13354,6 +13358,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev) | |||
13354 | RCU_INIT_POINTER(bp->cnic_ops, NULL); | 13358 | RCU_INIT_POINTER(bp->cnic_ops, NULL); |
13355 | mutex_unlock(&bp->cnic_mutex); | 13359 | mutex_unlock(&bp->cnic_mutex); |
13356 | synchronize_rcu(); | 13360 | synchronize_rcu(); |
13361 | bp->cnic_enabled = false; | ||
13357 | kfree(bp->cnic_kwq); | 13362 | kfree(bp->cnic_kwq); |
13358 | bp->cnic_kwq = NULL; | 13363 | bp->cnic_kwq = NULL; |
13359 | 13364 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 67d2663b3974..17a972734ba7 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -14604,8 +14604,11 @@ static void tg3_read_vpd(struct tg3 *tp) | |||
14604 | if (j + len > block_end) | 14604 | if (j + len > block_end) |
14605 | goto partno; | 14605 | goto partno; |
14606 | 14606 | ||
14607 | memcpy(tp->fw_ver, &vpd_data[j], len); | 14607 | if (len >= sizeof(tp->fw_ver)) |
14608 | strncat(tp->fw_ver, " bc ", vpdlen - len - 1); | 14608 | len = sizeof(tp->fw_ver) - 1; |
14609 | memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); | ||
14610 | snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, | ||
14611 | &vpd_data[j]); | ||
14609 | } | 14612 | } |
14610 | 14613 | ||
14611 | partno: | 14614 | partno: |
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index a170065b5973..b0ebc9f6d55e 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c | |||
@@ -163,6 +163,7 @@ | |||
163 | #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ | 163 | #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ |
164 | 164 | ||
165 | /* XGMAC_INT_STAT reg */ | 165 | /* XGMAC_INT_STAT reg */ |
166 | #define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */ | ||
166 | #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ | 167 | #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ |
167 | #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ | 168 | #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ |
168 | 169 | ||
@@ -960,6 +961,9 @@ static int xgmac_hw_init(struct net_device *dev) | |||
960 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); | 961 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); |
961 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); | 962 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); |
962 | 963 | ||
964 | /* Mask power mgt interrupt */ | ||
965 | writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); | ||
966 | |||
963 | /* XGMAC requires AXI bus init. This is a 'magic number' for now */ | 967 | /* XGMAC requires AXI bus init. This is a 'magic number' for now */ |
964 | writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); | 968 | writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); |
965 | 969 | ||
@@ -1141,6 +1145,9 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit) | |||
1141 | struct sk_buff *skb; | 1145 | struct sk_buff *skb; |
1142 | int frame_len; | 1146 | int frame_len; |
1143 | 1147 | ||
1148 | if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ)) | ||
1149 | break; | ||
1150 | |||
1144 | entry = priv->rx_tail; | 1151 | entry = priv->rx_tail; |
1145 | p = priv->dma_rx + entry; | 1152 | p = priv->dma_rx + entry; |
1146 | if (desc_get_owner(p)) | 1153 | if (desc_get_owner(p)) |
@@ -1825,7 +1832,7 @@ static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode) | |||
1825 | unsigned int pmt = 0; | 1832 | unsigned int pmt = 0; |
1826 | 1833 | ||
1827 | if (mode & WAKE_MAGIC) | 1834 | if (mode & WAKE_MAGIC) |
1828 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT; | 1835 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN; |
1829 | if (mode & WAKE_UCAST) | 1836 | if (mode & WAKE_UCAST) |
1830 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; | 1837 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; |
1831 | 1838 | ||
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 8cdf02503d13..9eada8e86078 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c | |||
@@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count) | |||
257 | tmp = readl(reg); | 257 | tmp = readl(reg); |
258 | } | 258 | } |
259 | 259 | ||
260 | /* | ||
261 | * Sleep, either by using msleep() or if we are suspending, then | ||
262 | * use mdelay() to sleep. | ||
263 | */ | ||
264 | static void dm9000_msleep(board_info_t *db, unsigned int ms) | ||
265 | { | ||
266 | if (db->in_suspend) | ||
267 | mdelay(ms); | ||
268 | else | ||
269 | msleep(ms); | ||
270 | } | ||
271 | |||
272 | /* Read a word from phyxcer */ | ||
273 | static int | ||
274 | dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) | ||
275 | { | ||
276 | board_info_t *db = netdev_priv(dev); | ||
277 | unsigned long flags; | ||
278 | unsigned int reg_save; | ||
279 | int ret; | ||
280 | |||
281 | mutex_lock(&db->addr_lock); | ||
282 | |||
283 | spin_lock_irqsave(&db->lock, flags); | ||
284 | |||
285 | /* Save previous register address */ | ||
286 | reg_save = readb(db->io_addr); | ||
287 | |||
288 | /* Fill the phyxcer register into REG_0C */ | ||
289 | iow(db, DM9000_EPAR, DM9000_PHY | reg); | ||
290 | |||
291 | /* Issue phyxcer read command */ | ||
292 | iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); | ||
293 | |||
294 | writeb(reg_save, db->io_addr); | ||
295 | spin_unlock_irqrestore(&db->lock, flags); | ||
296 | |||
297 | dm9000_msleep(db, 1); /* Wait read complete */ | ||
298 | |||
299 | spin_lock_irqsave(&db->lock, flags); | ||
300 | reg_save = readb(db->io_addr); | ||
301 | |||
302 | iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ | ||
303 | |||
304 | /* The read data keeps on REG_0D & REG_0E */ | ||
305 | ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); | ||
306 | |||
307 | /* restore the previous address */ | ||
308 | writeb(reg_save, db->io_addr); | ||
309 | spin_unlock_irqrestore(&db->lock, flags); | ||
310 | |||
311 | mutex_unlock(&db->addr_lock); | ||
312 | |||
313 | dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); | ||
314 | return ret; | ||
315 | } | ||
316 | |||
317 | /* Write a word to phyxcer */ | ||
318 | static void | ||
319 | dm9000_phy_write(struct net_device *dev, | ||
320 | int phyaddr_unused, int reg, int value) | ||
321 | { | ||
322 | board_info_t *db = netdev_priv(dev); | ||
323 | unsigned long flags; | ||
324 | unsigned long reg_save; | ||
325 | |||
326 | dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); | ||
327 | mutex_lock(&db->addr_lock); | ||
328 | |||
329 | spin_lock_irqsave(&db->lock, flags); | ||
330 | |||
331 | /* Save previous register address */ | ||
332 | reg_save = readb(db->io_addr); | ||
333 | |||
334 | /* Fill the phyxcer register into REG_0C */ | ||
335 | iow(db, DM9000_EPAR, DM9000_PHY | reg); | ||
336 | |||
337 | /* Fill the written data into REG_0D & REG_0E */ | ||
338 | iow(db, DM9000_EPDRL, value); | ||
339 | iow(db, DM9000_EPDRH, value >> 8); | ||
340 | |||
341 | /* Issue phyxcer write command */ | ||
342 | iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); | ||
343 | |||
344 | writeb(reg_save, db->io_addr); | ||
345 | spin_unlock_irqrestore(&db->lock, flags); | ||
346 | |||
347 | dm9000_msleep(db, 1); /* Wait write complete */ | ||
348 | |||
349 | spin_lock_irqsave(&db->lock, flags); | ||
350 | reg_save = readb(db->io_addr); | ||
351 | |||
352 | iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ | ||
353 | |||
354 | /* restore the previous address */ | ||
355 | writeb(reg_save, db->io_addr); | ||
356 | |||
357 | spin_unlock_irqrestore(&db->lock, flags); | ||
358 | mutex_unlock(&db->addr_lock); | ||
359 | } | ||
360 | |||
260 | /* dm9000_set_io | 361 | /* dm9000_set_io |
261 | * | 362 | * |
262 | * select the specified set of io routines to use with the | 363 | * select the specified set of io routines to use with the |
@@ -795,6 +896,9 @@ dm9000_init_dm9000(struct net_device *dev) | |||
795 | 896 | ||
796 | iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ | 897 | iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ |
797 | 898 | ||
899 | dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ | ||
900 | dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */ | ||
901 | |||
798 | ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; | 902 | ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; |
799 | 903 | ||
800 | /* if wol is needed, then always set NCR_WAKEEN otherwise we end | 904 | /* if wol is needed, then always set NCR_WAKEEN otherwise we end |
@@ -1201,109 +1305,6 @@ dm9000_open(struct net_device *dev) | |||
1201 | return 0; | 1305 | return 0; |
1202 | } | 1306 | } |
1203 | 1307 | ||
1204 | /* | ||
1205 | * Sleep, either by using msleep() or if we are suspending, then | ||
1206 | * use mdelay() to sleep. | ||
1207 | */ | ||
1208 | static void dm9000_msleep(board_info_t *db, unsigned int ms) | ||
1209 | { | ||
1210 | if (db->in_suspend) | ||
1211 | mdelay(ms); | ||
1212 | else | ||
1213 | msleep(ms); | ||
1214 | } | ||
1215 | |||
1216 | /* | ||
1217 | * Read a word from phyxcer | ||
1218 | */ | ||
1219 | static int | ||
1220 | dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) | ||
1221 | { | ||
1222 | board_info_t *db = netdev_priv(dev); | ||
1223 | unsigned long flags; | ||
1224 | unsigned int reg_save; | ||
1225 | int ret; | ||
1226 | |||
1227 | mutex_lock(&db->addr_lock); | ||
1228 | |||
1229 | spin_lock_irqsave(&db->lock,flags); | ||
1230 | |||
1231 | /* Save previous register address */ | ||
1232 | reg_save = readb(db->io_addr); | ||
1233 | |||
1234 | /* Fill the phyxcer register into REG_0C */ | ||
1235 | iow(db, DM9000_EPAR, DM9000_PHY | reg); | ||
1236 | |||
1237 | iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */ | ||
1238 | |||
1239 | writeb(reg_save, db->io_addr); | ||
1240 | spin_unlock_irqrestore(&db->lock,flags); | ||
1241 | |||
1242 | dm9000_msleep(db, 1); /* Wait read complete */ | ||
1243 | |||
1244 | spin_lock_irqsave(&db->lock,flags); | ||
1245 | reg_save = readb(db->io_addr); | ||
1246 | |||
1247 | iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ | ||
1248 | |||
1249 | /* The read data keeps on REG_0D & REG_0E */ | ||
1250 | ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); | ||
1251 | |||
1252 | /* restore the previous address */ | ||
1253 | writeb(reg_save, db->io_addr); | ||
1254 | spin_unlock_irqrestore(&db->lock,flags); | ||
1255 | |||
1256 | mutex_unlock(&db->addr_lock); | ||
1257 | |||
1258 | dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); | ||
1259 | return ret; | ||
1260 | } | ||
1261 | |||
1262 | /* | ||
1263 | * Write a word to phyxcer | ||
1264 | */ | ||
1265 | static void | ||
1266 | dm9000_phy_write(struct net_device *dev, | ||
1267 | int phyaddr_unused, int reg, int value) | ||
1268 | { | ||
1269 | board_info_t *db = netdev_priv(dev); | ||
1270 | unsigned long flags; | ||
1271 | unsigned long reg_save; | ||
1272 | |||
1273 | dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); | ||
1274 | mutex_lock(&db->addr_lock); | ||
1275 | |||
1276 | spin_lock_irqsave(&db->lock,flags); | ||
1277 | |||
1278 | /* Save previous register address */ | ||
1279 | reg_save = readb(db->io_addr); | ||
1280 | |||
1281 | /* Fill the phyxcer register into REG_0C */ | ||
1282 | iow(db, DM9000_EPAR, DM9000_PHY | reg); | ||
1283 | |||
1284 | /* Fill the written data into REG_0D & REG_0E */ | ||
1285 | iow(db, DM9000_EPDRL, value); | ||
1286 | iow(db, DM9000_EPDRH, value >> 8); | ||
1287 | |||
1288 | iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */ | ||
1289 | |||
1290 | writeb(reg_save, db->io_addr); | ||
1291 | spin_unlock_irqrestore(&db->lock, flags); | ||
1292 | |||
1293 | dm9000_msleep(db, 1); /* Wait write complete */ | ||
1294 | |||
1295 | spin_lock_irqsave(&db->lock,flags); | ||
1296 | reg_save = readb(db->io_addr); | ||
1297 | |||
1298 | iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ | ||
1299 | |||
1300 | /* restore the previous address */ | ||
1301 | writeb(reg_save, db->io_addr); | ||
1302 | |||
1303 | spin_unlock_irqrestore(&db->lock, flags); | ||
1304 | mutex_unlock(&db->addr_lock); | ||
1305 | } | ||
1306 | |||
1307 | static void | 1308 | static void |
1308 | dm9000_shutdown(struct net_device *dev) | 1309 | dm9000_shutdown(struct net_device *dev) |
1309 | { | 1310 | { |
@@ -1502,7 +1503,12 @@ dm9000_probe(struct platform_device *pdev) | |||
1502 | db->flags |= DM9000_PLATF_SIMPLE_PHY; | 1503 | db->flags |= DM9000_PLATF_SIMPLE_PHY; |
1503 | #endif | 1504 | #endif |
1504 | 1505 | ||
1505 | dm9000_reset(db); | 1506 | /* Fixing bug on dm9000_probe, takeover dm9000_reset(db), |
1507 | * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo | ||
1508 | * while probe stage. | ||
1509 | */ | ||
1510 | |||
1511 | iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST); | ||
1506 | 1512 | ||
1507 | /* try multiple times, DM9000 sometimes gets the read wrong */ | 1513 | /* try multiple times, DM9000 sometimes gets the read wrong */ |
1508 | for (i = 0; i < 8; i++) { | 1514 | for (i = 0; i < 8; i++) { |
diff --git a/drivers/net/ethernet/davicom/dm9000.h b/drivers/net/ethernet/davicom/dm9000.h index 55688bd1a3ef..9ce058adabab 100644 --- a/drivers/net/ethernet/davicom/dm9000.h +++ b/drivers/net/ethernet/davicom/dm9000.h | |||
@@ -69,7 +69,9 @@ | |||
69 | #define NCR_WAKEEN (1<<6) | 69 | #define NCR_WAKEEN (1<<6) |
70 | #define NCR_FCOL (1<<4) | 70 | #define NCR_FCOL (1<<4) |
71 | #define NCR_FDX (1<<3) | 71 | #define NCR_FDX (1<<3) |
72 | #define NCR_LBK (3<<1) | 72 | |
73 | #define NCR_RESERVED (3<<1) | ||
74 | #define NCR_MAC_LBK (1<<1) | ||
73 | #define NCR_RST (1<<0) | 75 | #define NCR_RST (1<<0) |
74 | 76 | ||
75 | #define NSR_SPEED (1<<7) | 77 | #define NSR_SPEED (1<<7) |
@@ -167,5 +169,12 @@ | |||
167 | #define ISR_LNKCHNG (1<<5) | 169 | #define ISR_LNKCHNG (1<<5) |
168 | #define ISR_UNDERRUN (1<<4) | 170 | #define ISR_UNDERRUN (1<<4) |
169 | 171 | ||
172 | /* Davicom MII registers. | ||
173 | */ | ||
174 | |||
175 | #define MII_DM_DSPCR 0x1b /* DSP Control Register */ | ||
176 | |||
177 | #define DSPCR_INIT_PARAM 0xE100 /* DSP init parameter */ | ||
178 | |||
170 | #endif /* _DM9000X_H_ */ | 179 | #endif /* _DM9000X_H_ */ |
171 | 180 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 08e54f3d288b..2886c9b63f90 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -759,8 +759,9 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, | |||
759 | 759 | ||
760 | if (vlan_tx_tag_present(skb)) { | 760 | if (vlan_tx_tag_present(skb)) { |
761 | vlan_tag = be_get_tx_vlan_tag(adapter, skb); | 761 | vlan_tag = be_get_tx_vlan_tag(adapter, skb); |
762 | __vlan_put_tag(skb, vlan_tag); | 762 | skb = __vlan_put_tag(skb, vlan_tag); |
763 | skb->vlan_tci = 0; | 763 | if (skb) |
764 | skb->vlan_tci = 0; | ||
764 | } | 765 | } |
765 | 766 | ||
766 | return skb; | 767 | return skb; |
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index 911d0253dbb2..73195f643c9c 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c | |||
@@ -345,6 +345,53 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
345 | return NETDEV_TX_OK; | 345 | return NETDEV_TX_OK; |
346 | } | 346 | } |
347 | 347 | ||
348 | /* Init RX & TX buffer descriptors | ||
349 | */ | ||
350 | static void fec_enet_bd_init(struct net_device *dev) | ||
351 | { | ||
352 | struct fec_enet_private *fep = netdev_priv(dev); | ||
353 | struct bufdesc *bdp; | ||
354 | unsigned int i; | ||
355 | |||
356 | /* Initialize the receive buffer descriptors. */ | ||
357 | bdp = fep->rx_bd_base; | ||
358 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
359 | |||
360 | /* Initialize the BD for every fragment in the page. */ | ||
361 | if (bdp->cbd_bufaddr) | ||
362 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | ||
363 | else | ||
364 | bdp->cbd_sc = 0; | ||
365 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
366 | } | ||
367 | |||
368 | /* Set the last buffer to wrap */ | ||
369 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | ||
370 | bdp->cbd_sc |= BD_SC_WRAP; | ||
371 | |||
372 | fep->cur_rx = fep->rx_bd_base; | ||
373 | |||
374 | /* ...and the same for transmit */ | ||
375 | bdp = fep->tx_bd_base; | ||
376 | fep->cur_tx = bdp; | ||
377 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
378 | |||
379 | /* Initialize the BD for every fragment in the page. */ | ||
380 | bdp->cbd_sc = 0; | ||
381 | if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) { | ||
382 | dev_kfree_skb_any(fep->tx_skbuff[i]); | ||
383 | fep->tx_skbuff[i] = NULL; | ||
384 | } | ||
385 | bdp->cbd_bufaddr = 0; | ||
386 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
387 | } | ||
388 | |||
389 | /* Set the last buffer to wrap */ | ||
390 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | ||
391 | bdp->cbd_sc |= BD_SC_WRAP; | ||
392 | fep->dirty_tx = bdp; | ||
393 | } | ||
394 | |||
348 | /* This function is called to start or restart the FEC during a link | 395 | /* This function is called to start or restart the FEC during a link |
349 | * change. This only happens when switching between half and full | 396 | * change. This only happens when switching between half and full |
350 | * duplex. | 397 | * duplex. |
@@ -388,6 +435,8 @@ fec_restart(struct net_device *ndev, int duplex) | |||
388 | /* Set maximum receive buffer size. */ | 435 | /* Set maximum receive buffer size. */ |
389 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); | 436 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); |
390 | 437 | ||
438 | fec_enet_bd_init(ndev); | ||
439 | |||
391 | /* Set receive and transmit descriptor base. */ | 440 | /* Set receive and transmit descriptor base. */ |
392 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); | 441 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); |
393 | if (fep->bufdesc_ex) | 442 | if (fep->bufdesc_ex) |
@@ -397,7 +446,6 @@ fec_restart(struct net_device *ndev, int duplex) | |||
397 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) | 446 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) |
398 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); | 447 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); |
399 | 448 | ||
400 | fep->cur_rx = fep->rx_bd_base; | ||
401 | 449 | ||
402 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { | 450 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { |
403 | if (fep->tx_skbuff[i]) { | 451 | if (fep->tx_skbuff[i]) { |
@@ -954,6 +1002,7 @@ static void fec_enet_adjust_link(struct net_device *ndev) | |||
954 | } else { | 1002 | } else { |
955 | if (fep->link) { | 1003 | if (fep->link) { |
956 | fec_stop(ndev); | 1004 | fec_stop(ndev); |
1005 | fep->link = phy_dev->link; | ||
957 | status_change = 1; | 1006 | status_change = 1; |
958 | } | 1007 | } |
959 | } | 1008 | } |
@@ -1597,8 +1646,6 @@ static int fec_enet_init(struct net_device *ndev) | |||
1597 | { | 1646 | { |
1598 | struct fec_enet_private *fep = netdev_priv(ndev); | 1647 | struct fec_enet_private *fep = netdev_priv(ndev); |
1599 | struct bufdesc *cbd_base; | 1648 | struct bufdesc *cbd_base; |
1600 | struct bufdesc *bdp; | ||
1601 | unsigned int i; | ||
1602 | 1649 | ||
1603 | /* Allocate memory for buffer descriptors. */ | 1650 | /* Allocate memory for buffer descriptors. */ |
1604 | cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, | 1651 | cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, |
@@ -1608,6 +1655,7 @@ static int fec_enet_init(struct net_device *ndev) | |||
1608 | return -ENOMEM; | 1655 | return -ENOMEM; |
1609 | } | 1656 | } |
1610 | 1657 | ||
1658 | memset(cbd_base, 0, PAGE_SIZE); | ||
1611 | spin_lock_init(&fep->hw_lock); | 1659 | spin_lock_init(&fep->hw_lock); |
1612 | 1660 | ||
1613 | fep->netdev = ndev; | 1661 | fep->netdev = ndev; |
@@ -1631,35 +1679,6 @@ static int fec_enet_init(struct net_device *ndev) | |||
1631 | writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); | 1679 | writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); |
1632 | netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); | 1680 | netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); |
1633 | 1681 | ||
1634 | /* Initialize the receive buffer descriptors. */ | ||
1635 | bdp = fep->rx_bd_base; | ||
1636 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1637 | |||
1638 | /* Initialize the BD for every fragment in the page. */ | ||
1639 | bdp->cbd_sc = 0; | ||
1640 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
1641 | } | ||
1642 | |||
1643 | /* Set the last buffer to wrap */ | ||
1644 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | ||
1645 | bdp->cbd_sc |= BD_SC_WRAP; | ||
1646 | |||
1647 | /* ...and the same for transmit */ | ||
1648 | bdp = fep->tx_bd_base; | ||
1649 | fep->cur_tx = bdp; | ||
1650 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
1651 | |||
1652 | /* Initialize the BD for every fragment in the page. */ | ||
1653 | bdp->cbd_sc = 0; | ||
1654 | bdp->cbd_bufaddr = 0; | ||
1655 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
1656 | } | ||
1657 | |||
1658 | /* Set the last buffer to wrap */ | ||
1659 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | ||
1660 | bdp->cbd_sc |= BD_SC_WRAP; | ||
1661 | fep->dirty_tx = bdp; | ||
1662 | |||
1663 | fec_restart(ndev, 0); | 1682 | fec_restart(ndev, 0); |
1664 | 1683 | ||
1665 | return 0; | 1684 | return 0; |
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index ec800b093e7e..d2bea3f07c73 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c | |||
@@ -870,7 +870,7 @@ err_unlock: | |||
870 | } | 870 | } |
871 | 871 | ||
872 | static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, | 872 | static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, |
873 | void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) | 873 | int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) |
874 | { | 874 | { |
875 | struct cb *cb; | 875 | struct cb *cb; |
876 | unsigned long flags; | 876 | unsigned long flags; |
@@ -888,10 +888,13 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, | |||
888 | nic->cbs_avail--; | 888 | nic->cbs_avail--; |
889 | cb->skb = skb; | 889 | cb->skb = skb; |
890 | 890 | ||
891 | err = cb_prepare(nic, cb, skb); | ||
892 | if (err) | ||
893 | goto err_unlock; | ||
894 | |||
891 | if (unlikely(!nic->cbs_avail)) | 895 | if (unlikely(!nic->cbs_avail)) |
892 | err = -ENOSPC; | 896 | err = -ENOSPC; |
893 | 897 | ||
894 | cb_prepare(nic, cb, skb); | ||
895 | 898 | ||
896 | /* Order is important otherwise we'll be in a race with h/w: | 899 | /* Order is important otherwise we'll be in a race with h/w: |
897 | * set S-bit in current first, then clear S-bit in previous. */ | 900 | * set S-bit in current first, then clear S-bit in previous. */ |
@@ -1091,7 +1094,7 @@ static void e100_get_defaults(struct nic *nic) | |||
1091 | nic->mii.mdio_write = mdio_write; | 1094 | nic->mii.mdio_write = mdio_write; |
1092 | } | 1095 | } |
1093 | 1096 | ||
1094 | static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) | 1097 | static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1095 | { | 1098 | { |
1096 | struct config *config = &cb->u.config; | 1099 | struct config *config = &cb->u.config; |
1097 | u8 *c = (u8 *)config; | 1100 | u8 *c = (u8 *)config; |
@@ -1181,6 +1184,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) | |||
1181 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, | 1184 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
1182 | "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", | 1185 | "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", |
1183 | c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); | 1186 | c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); |
1187 | return 0; | ||
1184 | } | 1188 | } |
1185 | 1189 | ||
1186 | /************************************************************************* | 1190 | /************************************************************************* |
@@ -1331,7 +1335,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic) | |||
1331 | return fw; | 1335 | return fw; |
1332 | } | 1336 | } |
1333 | 1337 | ||
1334 | static void e100_setup_ucode(struct nic *nic, struct cb *cb, | 1338 | static int e100_setup_ucode(struct nic *nic, struct cb *cb, |
1335 | struct sk_buff *skb) | 1339 | struct sk_buff *skb) |
1336 | { | 1340 | { |
1337 | const struct firmware *fw = (void *)skb; | 1341 | const struct firmware *fw = (void *)skb; |
@@ -1358,6 +1362,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb, | |||
1358 | cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); | 1362 | cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); |
1359 | 1363 | ||
1360 | cb->command = cpu_to_le16(cb_ucode | cb_el); | 1364 | cb->command = cpu_to_le16(cb_ucode | cb_el); |
1365 | return 0; | ||
1361 | } | 1366 | } |
1362 | 1367 | ||
1363 | static inline int e100_load_ucode_wait(struct nic *nic) | 1368 | static inline int e100_load_ucode_wait(struct nic *nic) |
@@ -1400,18 +1405,20 @@ static inline int e100_load_ucode_wait(struct nic *nic) | |||
1400 | return err; | 1405 | return err; |
1401 | } | 1406 | } |
1402 | 1407 | ||
1403 | static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, | 1408 | static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, |
1404 | struct sk_buff *skb) | 1409 | struct sk_buff *skb) |
1405 | { | 1410 | { |
1406 | cb->command = cpu_to_le16(cb_iaaddr); | 1411 | cb->command = cpu_to_le16(cb_iaaddr); |
1407 | memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); | 1412 | memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); |
1413 | return 0; | ||
1408 | } | 1414 | } |
1409 | 1415 | ||
1410 | static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) | 1416 | static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1411 | { | 1417 | { |
1412 | cb->command = cpu_to_le16(cb_dump); | 1418 | cb->command = cpu_to_le16(cb_dump); |
1413 | cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + | 1419 | cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + |
1414 | offsetof(struct mem, dump_buf)); | 1420 | offsetof(struct mem, dump_buf)); |
1421 | return 0; | ||
1415 | } | 1422 | } |
1416 | 1423 | ||
1417 | static int e100_phy_check_without_mii(struct nic *nic) | 1424 | static int e100_phy_check_without_mii(struct nic *nic) |
@@ -1581,7 +1588,7 @@ static int e100_hw_init(struct nic *nic) | |||
1581 | return 0; | 1588 | return 0; |
1582 | } | 1589 | } |
1583 | 1590 | ||
1584 | static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) | 1591 | static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1585 | { | 1592 | { |
1586 | struct net_device *netdev = nic->netdev; | 1593 | struct net_device *netdev = nic->netdev; |
1587 | struct netdev_hw_addr *ha; | 1594 | struct netdev_hw_addr *ha; |
@@ -1596,6 +1603,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) | |||
1596 | memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, | 1603 | memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, |
1597 | ETH_ALEN); | 1604 | ETH_ALEN); |
1598 | } | 1605 | } |
1606 | return 0; | ||
1599 | } | 1607 | } |
1600 | 1608 | ||
1601 | static void e100_set_multicast_list(struct net_device *netdev) | 1609 | static void e100_set_multicast_list(struct net_device *netdev) |
@@ -1756,11 +1764,18 @@ static void e100_watchdog(unsigned long data) | |||
1756 | round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); | 1764 | round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); |
1757 | } | 1765 | } |
1758 | 1766 | ||
1759 | static void e100_xmit_prepare(struct nic *nic, struct cb *cb, | 1767 | static int e100_xmit_prepare(struct nic *nic, struct cb *cb, |
1760 | struct sk_buff *skb) | 1768 | struct sk_buff *skb) |
1761 | { | 1769 | { |
1770 | dma_addr_t dma_addr; | ||
1762 | cb->command = nic->tx_command; | 1771 | cb->command = nic->tx_command; |
1763 | 1772 | ||
1773 | dma_addr = pci_map_single(nic->pdev, | ||
1774 | skb->data, skb->len, PCI_DMA_TODEVICE); | ||
1775 | /* If we can't map the skb, have the upper layer try later */ | ||
1776 | if (pci_dma_mapping_error(nic->pdev, dma_addr)) | ||
1777 | return -ENOMEM; | ||
1778 | |||
1764 | /* | 1779 | /* |
1765 | * Use the last 4 bytes of the SKB payload packet as the CRC, used for | 1780 | * Use the last 4 bytes of the SKB payload packet as the CRC, used for |
1766 | * testing, ie sending frames with bad CRC. | 1781 | * testing, ie sending frames with bad CRC. |
@@ -1777,11 +1792,10 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb, | |||
1777 | cb->u.tcb.tcb_byte_count = 0; | 1792 | cb->u.tcb.tcb_byte_count = 0; |
1778 | cb->u.tcb.threshold = nic->tx_threshold; | 1793 | cb->u.tcb.threshold = nic->tx_threshold; |
1779 | cb->u.tcb.tbd_count = 1; | 1794 | cb->u.tcb.tbd_count = 1; |
1780 | cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, | 1795 | cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); |
1781 | skb->data, skb->len, PCI_DMA_TODEVICE)); | ||
1782 | /* check for mapping failure? */ | ||
1783 | cb->u.tcb.tbd.size = cpu_to_le16(skb->len); | 1796 | cb->u.tcb.tbd.size = cpu_to_le16(skb->len); |
1784 | skb_tx_timestamp(skb); | 1797 | skb_tx_timestamp(skb); |
1798 | return 0; | ||
1785 | } | 1799 | } |
1786 | 1800 | ||
1787 | static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, | 1801 | static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 43462d596a4e..ffd287196bf8 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c | |||
@@ -1053,6 +1053,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1053 | txdr->buffer_info[i].dma = | 1053 | txdr->buffer_info[i].dma = |
1054 | dma_map_single(&pdev->dev, skb->data, skb->len, | 1054 | dma_map_single(&pdev->dev, skb->data, skb->len, |
1055 | DMA_TO_DEVICE); | 1055 | DMA_TO_DEVICE); |
1056 | if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { | ||
1057 | ret_val = 4; | ||
1058 | goto err_nomem; | ||
1059 | } | ||
1056 | tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); | 1060 | tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); |
1057 | tx_desc->lower.data = cpu_to_le32(skb->len); | 1061 | tx_desc->lower.data = cpu_to_le32(skb->len); |
1058 | tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | | 1062 | tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | |
@@ -1069,7 +1073,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1069 | rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), | 1073 | rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), |
1070 | GFP_KERNEL); | 1074 | GFP_KERNEL); |
1071 | if (!rxdr->buffer_info) { | 1075 | if (!rxdr->buffer_info) { |
1072 | ret_val = 4; | 1076 | ret_val = 5; |
1073 | goto err_nomem; | 1077 | goto err_nomem; |
1074 | } | 1078 | } |
1075 | 1079 | ||
@@ -1077,7 +1081,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1077 | rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, | 1081 | rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, |
1078 | GFP_KERNEL); | 1082 | GFP_KERNEL); |
1079 | if (!rxdr->desc) { | 1083 | if (!rxdr->desc) { |
1080 | ret_val = 5; | 1084 | ret_val = 6; |
1081 | goto err_nomem; | 1085 | goto err_nomem; |
1082 | } | 1086 | } |
1083 | memset(rxdr->desc, 0, rxdr->size); | 1087 | memset(rxdr->desc, 0, rxdr->size); |
@@ -1101,7 +1105,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1101 | 1105 | ||
1102 | skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); | 1106 | skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); |
1103 | if (!skb) { | 1107 | if (!skb) { |
1104 | ret_val = 6; | 1108 | ret_val = 7; |
1105 | goto err_nomem; | 1109 | goto err_nomem; |
1106 | } | 1110 | } |
1107 | skb_reserve(skb, NET_IP_ALIGN); | 1111 | skb_reserve(skb, NET_IP_ALIGN); |
@@ -1110,6 +1114,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1110 | rxdr->buffer_info[i].dma = | 1114 | rxdr->buffer_info[i].dma = |
1111 | dma_map_single(&pdev->dev, skb->data, | 1115 | dma_map_single(&pdev->dev, skb->data, |
1112 | E1000_RXBUFFER_2048, DMA_FROM_DEVICE); | 1116 | E1000_RXBUFFER_2048, DMA_FROM_DEVICE); |
1117 | if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { | ||
1118 | ret_val = 8; | ||
1119 | goto err_nomem; | ||
1120 | } | ||
1113 | rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); | 1121 | rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); |
1114 | memset(skb->data, 0x00, skb->len); | 1122 | memset(skb->data, 0x00, skb->len); |
1115 | } | 1123 | } |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 948b86ffa4f0..7e615e2bf7e6 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -848,11 +848,16 @@ check_page: | |||
848 | } | 848 | } |
849 | } | 849 | } |
850 | 850 | ||
851 | if (!buffer_info->dma) | 851 | if (!buffer_info->dma) { |
852 | buffer_info->dma = dma_map_page(&pdev->dev, | 852 | buffer_info->dma = dma_map_page(&pdev->dev, |
853 | buffer_info->page, 0, | 853 | buffer_info->page, 0, |
854 | PAGE_SIZE, | 854 | PAGE_SIZE, |
855 | DMA_FROM_DEVICE); | 855 | DMA_FROM_DEVICE); |
856 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | ||
857 | adapter->alloc_rx_buff_failed++; | ||
858 | break; | ||
859 | } | ||
860 | } | ||
856 | 861 | ||
857 | rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); | 862 | rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); |
858 | rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); | 863 | rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); |
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 25151401c2ab..ab577a763a20 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h | |||
@@ -284,18 +284,10 @@ struct igb_q_vector { | |||
284 | enum e1000_ring_flags_t { | 284 | enum e1000_ring_flags_t { |
285 | IGB_RING_FLAG_RX_SCTP_CSUM, | 285 | IGB_RING_FLAG_RX_SCTP_CSUM, |
286 | IGB_RING_FLAG_RX_LB_VLAN_BSWAP, | 286 | IGB_RING_FLAG_RX_LB_VLAN_BSWAP, |
287 | IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, | ||
288 | IGB_RING_FLAG_TX_CTX_IDX, | 287 | IGB_RING_FLAG_TX_CTX_IDX, |
289 | IGB_RING_FLAG_TX_DETECT_HANG | 288 | IGB_RING_FLAG_TX_DETECT_HANG |
290 | }; | 289 | }; |
291 | 290 | ||
292 | #define ring_uses_build_skb(ring) \ | ||
293 | test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) | ||
294 | #define set_ring_build_skb_enabled(ring) \ | ||
295 | set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) | ||
296 | #define clear_ring_build_skb_enabled(ring) \ | ||
297 | clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) | ||
298 | |||
299 | #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) | 291 | #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) |
300 | 292 | ||
301 | #define IGB_RX_DESC(R, i) \ | 293 | #define IGB_RX_DESC(R, i) \ |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8496adfc6a68..64f75291e3a5 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -3350,20 +3350,6 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, | |||
3350 | wr32(E1000_RXDCTL(reg_idx), rxdctl); | 3350 | wr32(E1000_RXDCTL(reg_idx), rxdctl); |
3351 | } | 3351 | } |
3352 | 3352 | ||
3353 | static void igb_set_rx_buffer_len(struct igb_adapter *adapter, | ||
3354 | struct igb_ring *rx_ring) | ||
3355 | { | ||
3356 | #define IGB_MAX_BUILD_SKB_SIZE \ | ||
3357 | (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \ | ||
3358 | (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN)) | ||
3359 | |||
3360 | /* set build_skb flag */ | ||
3361 | if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE) | ||
3362 | set_ring_build_skb_enabled(rx_ring); | ||
3363 | else | ||
3364 | clear_ring_build_skb_enabled(rx_ring); | ||
3365 | } | ||
3366 | |||
3367 | /** | 3353 | /** |
3368 | * igb_configure_rx - Configure receive Unit after Reset | 3354 | * igb_configure_rx - Configure receive Unit after Reset |
3369 | * @adapter: board private structure | 3355 | * @adapter: board private structure |
@@ -3383,11 +3369,8 @@ static void igb_configure_rx(struct igb_adapter *adapter) | |||
3383 | 3369 | ||
3384 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 3370 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
3385 | * the Base and Length of the Rx Descriptor Ring */ | 3371 | * the Base and Length of the Rx Descriptor Ring */ |
3386 | for (i = 0; i < adapter->num_rx_queues; i++) { | 3372 | for (i = 0; i < adapter->num_rx_queues; i++) |
3387 | struct igb_ring *rx_ring = adapter->rx_ring[i]; | 3373 | igb_configure_rx_ring(adapter, adapter->rx_ring[i]); |
3388 | igb_set_rx_buffer_len(adapter, rx_ring); | ||
3389 | igb_configure_rx_ring(adapter, rx_ring); | ||
3390 | } | ||
3391 | } | 3374 | } |
3392 | 3375 | ||
3393 | /** | 3376 | /** |
@@ -6203,78 +6186,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, | |||
6203 | return igb_can_reuse_rx_page(rx_buffer, page, truesize); | 6186 | return igb_can_reuse_rx_page(rx_buffer, page, truesize); |
6204 | } | 6187 | } |
6205 | 6188 | ||
6206 | static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring, | ||
6207 | union e1000_adv_rx_desc *rx_desc) | ||
6208 | { | ||
6209 | struct igb_rx_buffer *rx_buffer; | ||
6210 | struct sk_buff *skb; | ||
6211 | struct page *page; | ||
6212 | void *page_addr; | ||
6213 | unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); | ||
6214 | #if (PAGE_SIZE < 8192) | ||
6215 | unsigned int truesize = IGB_RX_BUFSZ; | ||
6216 | #else | ||
6217 | unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + | ||
6218 | SKB_DATA_ALIGN(NET_SKB_PAD + | ||
6219 | NET_IP_ALIGN + | ||
6220 | size); | ||
6221 | #endif | ||
6222 | |||
6223 | /* If we spanned a buffer we have a huge mess so test for it */ | ||
6224 | BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))); | ||
6225 | |||
6226 | rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; | ||
6227 | page = rx_buffer->page; | ||
6228 | prefetchw(page); | ||
6229 | |||
6230 | page_addr = page_address(page) + rx_buffer->page_offset; | ||
6231 | |||
6232 | /* prefetch first cache line of first page */ | ||
6233 | prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN); | ||
6234 | #if L1_CACHE_BYTES < 128 | ||
6235 | prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN); | ||
6236 | #endif | ||
6237 | |||
6238 | /* build an skb to around the page buffer */ | ||
6239 | skb = build_skb(page_addr, truesize); | ||
6240 | if (unlikely(!skb)) { | ||
6241 | rx_ring->rx_stats.alloc_failed++; | ||
6242 | return NULL; | ||
6243 | } | ||
6244 | |||
6245 | /* we are reusing so sync this buffer for CPU use */ | ||
6246 | dma_sync_single_range_for_cpu(rx_ring->dev, | ||
6247 | rx_buffer->dma, | ||
6248 | rx_buffer->page_offset, | ||
6249 | IGB_RX_BUFSZ, | ||
6250 | DMA_FROM_DEVICE); | ||
6251 | |||
6252 | /* update pointers within the skb to store the data */ | ||
6253 | skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); | ||
6254 | __skb_put(skb, size); | ||
6255 | |||
6256 | /* pull timestamp out of packet data */ | ||
6257 | if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { | ||
6258 | igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); | ||
6259 | __skb_pull(skb, IGB_TS_HDR_LEN); | ||
6260 | } | ||
6261 | |||
6262 | if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) { | ||
6263 | /* hand second half of page back to the ring */ | ||
6264 | igb_reuse_rx_page(rx_ring, rx_buffer); | ||
6265 | } else { | ||
6266 | /* we are not reusing the buffer so unmap it */ | ||
6267 | dma_unmap_page(rx_ring->dev, rx_buffer->dma, | ||
6268 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
6269 | } | ||
6270 | |||
6271 | /* clear contents of buffer_info */ | ||
6272 | rx_buffer->dma = 0; | ||
6273 | rx_buffer->page = NULL; | ||
6274 | |||
6275 | return skb; | ||
6276 | } | ||
6277 | |||
6278 | static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, | 6189 | static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, |
6279 | union e1000_adv_rx_desc *rx_desc, | 6190 | union e1000_adv_rx_desc *rx_desc, |
6280 | struct sk_buff *skb) | 6191 | struct sk_buff *skb) |
@@ -6690,10 +6601,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) | |||
6690 | rmb(); | 6601 | rmb(); |
6691 | 6602 | ||
6692 | /* retrieve a buffer from the ring */ | 6603 | /* retrieve a buffer from the ring */ |
6693 | if (ring_uses_build_skb(rx_ring)) | 6604 | skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); |
6694 | skb = igb_build_rx_buffer(rx_ring, rx_desc); | ||
6695 | else | ||
6696 | skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); | ||
6697 | 6605 | ||
6698 | /* exit if we failed to retrieve a buffer */ | 6606 | /* exit if we failed to retrieve a buffer */ |
6699 | if (!skb) | 6607 | if (!skb) |
@@ -6780,14 +6688,6 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, | |||
6780 | return true; | 6688 | return true; |
6781 | } | 6689 | } |
6782 | 6690 | ||
6783 | static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) | ||
6784 | { | ||
6785 | if (ring_uses_build_skb(rx_ring)) | ||
6786 | return NET_SKB_PAD + NET_IP_ALIGN; | ||
6787 | else | ||
6788 | return 0; | ||
6789 | } | ||
6790 | |||
6791 | /** | 6691 | /** |
6792 | * igb_alloc_rx_buffers - Replace used receive buffers; packet split | 6692 | * igb_alloc_rx_buffers - Replace used receive buffers; packet split |
6793 | * @adapter: address of board private structure | 6693 | * @adapter: address of board private structure |
@@ -6814,9 +6714,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) | |||
6814 | * Refresh the desc even if buffer_addrs didn't change | 6714 | * Refresh the desc even if buffer_addrs didn't change |
6815 | * because each write-back erases this info. | 6715 | * because each write-back erases this info. |
6816 | */ | 6716 | */ |
6817 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + | 6717 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); |
6818 | bi->page_offset + | ||
6819 | igb_rx_offset(rx_ring)); | ||
6820 | 6718 | ||
6821 | rx_desc++; | 6719 | rx_desc++; |
6822 | bi++; | 6720 | bi++; |
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index ea4808373435..b5f94abe3cff 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c | |||
@@ -2159,6 +2159,10 @@ map_skb: | |||
2159 | skb->data, | 2159 | skb->data, |
2160 | adapter->rx_buffer_len, | 2160 | adapter->rx_buffer_len, |
2161 | DMA_FROM_DEVICE); | 2161 | DMA_FROM_DEVICE); |
2162 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | ||
2163 | adapter->alloc_rx_buff_failed++; | ||
2164 | break; | ||
2165 | } | ||
2162 | 2166 | ||
2163 | rx_desc = IXGB_RX_DESC(*rx_ring, i); | 2167 | rx_desc = IXGB_RX_DESC(*rx_ring, i); |
2164 | rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); | 2168 | rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); |
@@ -2168,7 +2172,8 @@ map_skb: | |||
2168 | rx_desc->status = 0; | 2172 | rx_desc->status = 0; |
2169 | 2173 | ||
2170 | 2174 | ||
2171 | if (++i == rx_ring->count) i = 0; | 2175 | if (++i == rx_ring->count) |
2176 | i = 0; | ||
2172 | buffer_info = &rx_ring->buffer_info[i]; | 2177 | buffer_info = &rx_ring->buffer_info[i]; |
2173 | } | 2178 | } |
2174 | 2179 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index db5611ae407e..79f4a26ea6cc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -7922,12 +7922,19 @@ static int __init ixgbe_init_module(void) | |||
7922 | ixgbe_dbg_init(); | 7922 | ixgbe_dbg_init(); |
7923 | #endif /* CONFIG_DEBUG_FS */ | 7923 | #endif /* CONFIG_DEBUG_FS */ |
7924 | 7924 | ||
7925 | ret = pci_register_driver(&ixgbe_driver); | ||
7926 | if (ret) { | ||
7927 | #ifdef CONFIG_DEBUG_FS | ||
7928 | ixgbe_dbg_exit(); | ||
7929 | #endif /* CONFIG_DEBUG_FS */ | ||
7930 | return ret; | ||
7931 | } | ||
7932 | |||
7925 | #ifdef CONFIG_IXGBE_DCA | 7933 | #ifdef CONFIG_IXGBE_DCA |
7926 | dca_register_notify(&dca_notifier); | 7934 | dca_register_notify(&dca_notifier); |
7927 | #endif | 7935 | #endif |
7928 | 7936 | ||
7929 | ret = pci_register_driver(&ixgbe_driver); | 7937 | return 0; |
7930 | return ret; | ||
7931 | } | 7938 | } |
7932 | 7939 | ||
7933 | module_init(ixgbe_init_module); | 7940 | module_init(ixgbe_init_module); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index d44b4d21268c..97e33669c0b9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | |||
@@ -1049,6 +1049,12 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) | |||
1049 | if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) | 1049 | if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) |
1050 | return -EINVAL; | 1050 | return -EINVAL; |
1051 | if (vlan || qos) { | 1051 | if (vlan || qos) { |
1052 | if (adapter->vfinfo[vf].pf_vlan) | ||
1053 | err = ixgbe_set_vf_vlan(adapter, false, | ||
1054 | adapter->vfinfo[vf].pf_vlan, | ||
1055 | vf); | ||
1056 | if (err) | ||
1057 | goto out; | ||
1052 | err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); | 1058 | err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); |
1053 | if (err) | 1059 | if (err) |
1054 | goto out; | 1060 | goto out; |
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index edfba9370922..434e33c527df 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig | |||
@@ -33,6 +33,7 @@ config MV643XX_ETH | |||
33 | 33 | ||
34 | config MVMDIO | 34 | config MVMDIO |
35 | tristate "Marvell MDIO interface support" | 35 | tristate "Marvell MDIO interface support" |
36 | select PHYLIB | ||
36 | ---help--- | 37 | ---help--- |
37 | This driver supports the MDIO interface found in the network | 38 | This driver supports the MDIO interface found in the network |
38 | interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, | 39 | interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, |
@@ -45,7 +46,6 @@ config MVMDIO | |||
45 | config MVNETA | 46 | config MVNETA |
46 | tristate "Marvell Armada 370/XP network interface support" | 47 | tristate "Marvell Armada 370/XP network interface support" |
47 | depends on MACH_ARMADA_370_XP | 48 | depends on MACH_ARMADA_370_XP |
48 | select PHYLIB | ||
49 | select MVMDIO | 49 | select MVMDIO |
50 | ---help--- | 50 | ---help--- |
51 | This driver supports the network interface units in the | 51 | This driver supports the network interface units in the |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index cd345b8969bc..a47a097c21e1 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -374,7 +374,6 @@ static int rxq_number = 8; | |||
374 | static int txq_number = 8; | 374 | static int txq_number = 8; |
375 | 375 | ||
376 | static int rxq_def; | 376 | static int rxq_def; |
377 | static int txq_def; | ||
378 | 377 | ||
379 | #define MVNETA_DRIVER_NAME "mvneta" | 378 | #define MVNETA_DRIVER_NAME "mvneta" |
380 | #define MVNETA_DRIVER_VERSION "1.0" | 379 | #define MVNETA_DRIVER_VERSION "1.0" |
@@ -1475,7 +1474,8 @@ error: | |||
1475 | static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) | 1474 | static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) |
1476 | { | 1475 | { |
1477 | struct mvneta_port *pp = netdev_priv(dev); | 1476 | struct mvneta_port *pp = netdev_priv(dev); |
1478 | struct mvneta_tx_queue *txq = &pp->txqs[txq_def]; | 1477 | u16 txq_id = skb_get_queue_mapping(skb); |
1478 | struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; | ||
1479 | struct mvneta_tx_desc *tx_desc; | 1479 | struct mvneta_tx_desc *tx_desc; |
1480 | struct netdev_queue *nq; | 1480 | struct netdev_queue *nq; |
1481 | int frags = 0; | 1481 | int frags = 0; |
@@ -1485,7 +1485,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) | |||
1485 | goto out; | 1485 | goto out; |
1486 | 1486 | ||
1487 | frags = skb_shinfo(skb)->nr_frags + 1; | 1487 | frags = skb_shinfo(skb)->nr_frags + 1; |
1488 | nq = netdev_get_tx_queue(dev, txq_def); | 1488 | nq = netdev_get_tx_queue(dev, txq_id); |
1489 | 1489 | ||
1490 | /* Get a descriptor for the first part of the packet */ | 1490 | /* Get a descriptor for the first part of the packet */ |
1491 | tx_desc = mvneta_txq_next_desc_get(txq); | 1491 | tx_desc = mvneta_txq_next_desc_get(txq); |
@@ -2689,7 +2689,7 @@ static int mvneta_probe(struct platform_device *pdev) | |||
2689 | return -EINVAL; | 2689 | return -EINVAL; |
2690 | } | 2690 | } |
2691 | 2691 | ||
2692 | dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8); | 2692 | dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); |
2693 | if (!dev) | 2693 | if (!dev) |
2694 | return -ENOMEM; | 2694 | return -ENOMEM; |
2695 | 2695 | ||
@@ -2771,16 +2771,17 @@ static int mvneta_probe(struct platform_device *pdev) | |||
2771 | 2771 | ||
2772 | netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); | 2772 | netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); |
2773 | 2773 | ||
2774 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2775 | dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2776 | dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2777 | dev->priv_flags |= IFF_UNICAST_FLT; | ||
2778 | |||
2774 | err = register_netdev(dev); | 2779 | err = register_netdev(dev); |
2775 | if (err < 0) { | 2780 | if (err < 0) { |
2776 | dev_err(&pdev->dev, "failed to register\n"); | 2781 | dev_err(&pdev->dev, "failed to register\n"); |
2777 | goto err_deinit; | 2782 | goto err_deinit; |
2778 | } | 2783 | } |
2779 | 2784 | ||
2780 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2781 | dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2782 | dev->priv_flags |= IFF_UNICAST_FLT; | ||
2783 | |||
2784 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); | 2785 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); |
2785 | 2786 | ||
2786 | platform_set_drvdata(pdev, pp->dev); | 2787 | platform_set_drvdata(pdev, pp->dev); |
@@ -2843,4 +2844,3 @@ module_param(rxq_number, int, S_IRUGO); | |||
2843 | module_param(txq_number, int, S_IRUGO); | 2844 | module_param(txq_number, int, S_IRUGO); |
2844 | 2845 | ||
2845 | module_param(rxq_def, int, S_IRUGO); | 2846 | module_param(rxq_def, int, S_IRUGO); |
2846 | module_param(txq_def, int, S_IRUGO); | ||
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index fc07ca35721b..6a0e671fcecd 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c | |||
@@ -1067,7 +1067,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space) | |||
1067 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); | 1067 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); |
1068 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); | 1068 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); |
1069 | 1069 | ||
1070 | tp = space - 2048/8; | 1070 | tp = space - 8192/8; |
1071 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); | 1071 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); |
1072 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); | 1072 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); |
1073 | } else { | 1073 | } else { |
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index 615ac63ea860..ec6dcd80152b 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h | |||
@@ -2074,7 +2074,7 @@ enum { | |||
2074 | GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ | 2074 | GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ |
2075 | GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ | 2075 | GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ |
2076 | 2076 | ||
2077 | #define GMAC_DEF_MSK GM_IS_TX_FF_UR | 2077 | #define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR) |
2078 | }; | 2078 | }; |
2079 | 2079 | ||
2080 | /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ | 2080 | /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index f278b10ef714..30d78f806dc3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -411,8 +411,8 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
411 | 411 | ||
412 | static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) | 412 | static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) |
413 | { | 413 | { |
414 | unsigned int i; | 414 | int i; |
415 | for (i = ETH_ALEN - 1; i; --i) { | 415 | for (i = ETH_ALEN - 1; i >= 0; --i) { |
416 | dst_mac[i] = src_mac & 0xff; | 416 | dst_mac[i] = src_mac & 0xff; |
417 | src_mac >>= 8; | 417 | src_mac >>= 8; |
418 | } | 418 | } |
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 33bcb63d56a2..8fb481252e2c 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c | |||
@@ -528,7 +528,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) | |||
528 | for (; rxfc != 0; rxfc--) { | 528 | for (; rxfc != 0; rxfc--) { |
529 | rxh = ks8851_rdreg32(ks, KS_RXFHSR); | 529 | rxh = ks8851_rdreg32(ks, KS_RXFHSR); |
530 | rxstat = rxh & 0xffff; | 530 | rxstat = rxh & 0xffff; |
531 | rxlen = rxh >> 16; | 531 | rxlen = (rxh >> 16) & 0xfff; |
532 | 532 | ||
533 | netif_dbg(ks, rx_status, ks->netdev, | 533 | netif_dbg(ks, rx_status, ks->netdev, |
534 | "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); | 534 | "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index cd5ae8813cb3..edd63f1230f3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
@@ -1500,6 +1500,12 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) | |||
1500 | } | 1500 | } |
1501 | } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); | 1501 | } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); |
1502 | 1502 | ||
1503 | /* Make sure carrier is off and queue is stopped during loopback */ | ||
1504 | if (netif_running(netdev)) { | ||
1505 | netif_carrier_off(netdev); | ||
1506 | netif_stop_queue(netdev); | ||
1507 | } | ||
1508 | |||
1503 | ret = qlcnic_do_lb_test(adapter, mode); | 1509 | ret = qlcnic_do_lb_test(adapter, mode); |
1504 | 1510 | ||
1505 | qlcnic_83xx_clear_lb_mode(adapter, mode); | 1511 | qlcnic_83xx_clear_lb_mode(adapter, mode); |
@@ -2780,6 +2786,7 @@ static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter, | |||
2780 | void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) | 2786 | void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) |
2781 | { | 2787 | { |
2782 | struct qlcnic_cmd_args cmd; | 2788 | struct qlcnic_cmd_args cmd; |
2789 | struct net_device *netdev = adapter->netdev; | ||
2783 | int ret = 0; | 2790 | int ret = 0; |
2784 | 2791 | ||
2785 | qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); | 2792 | qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); |
@@ -2789,7 +2796,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) | |||
2789 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, | 2796 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, |
2790 | QLC_83XX_STAT_TX, &ret); | 2797 | QLC_83XX_STAT_TX, &ret); |
2791 | if (ret) { | 2798 | if (ret) { |
2792 | dev_info(&adapter->pdev->dev, "Error getting MAC stats\n"); | 2799 | netdev_err(netdev, "Error getting Tx stats\n"); |
2793 | goto out; | 2800 | goto out; |
2794 | } | 2801 | } |
2795 | /* Get MAC stats */ | 2802 | /* Get MAC stats */ |
@@ -2799,8 +2806,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) | |||
2799 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, | 2806 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, |
2800 | QLC_83XX_STAT_MAC, &ret); | 2807 | QLC_83XX_STAT_MAC, &ret); |
2801 | if (ret) { | 2808 | if (ret) { |
2802 | dev_info(&adapter->pdev->dev, | 2809 | netdev_err(netdev, "Error getting MAC stats\n"); |
2803 | "Error getting Rx stats\n"); | ||
2804 | goto out; | 2810 | goto out; |
2805 | } | 2811 | } |
2806 | /* Get Rx stats */ | 2812 | /* Get Rx stats */ |
@@ -2810,8 +2816,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) | |||
2810 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, | 2816 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, |
2811 | QLC_83XX_STAT_RX, &ret); | 2817 | QLC_83XX_STAT_RX, &ret); |
2812 | if (ret) | 2818 | if (ret) |
2813 | dev_info(&adapter->pdev->dev, | 2819 | netdev_err(netdev, "Error getting Rx stats\n"); |
2814 | "Error getting Tx stats\n"); | ||
2815 | out: | 2820 | out: |
2816 | qlcnic_free_mbx_args(&cmd); | 2821 | qlcnic_free_mbx_args(&cmd); |
2817 | } | 2822 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 0e630061bff3..5fa847fe388a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
@@ -358,8 +358,7 @@ set_flags: | |||
358 | memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); | 358 | memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); |
359 | } | 359 | } |
360 | opcode = TX_ETHER_PKT; | 360 | opcode = TX_ETHER_PKT; |
361 | if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && | 361 | if (skb_is_gso(skb)) { |
362 | skb_shinfo(skb)->gso_size > 0) { | ||
363 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 362 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
364 | first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | 363 | first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); |
365 | first_desc->total_hdr_length = hdr_len; | 364 | first_desc->total_hdr_length = hdr_len; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 987fb6f8adc3..5ef328af61d0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | |||
@@ -200,10 +200,10 @@ beacon_err: | |||
200 | } | 200 | } |
201 | 201 | ||
202 | err = qlcnic_config_led(adapter, b_state, b_rate); | 202 | err = qlcnic_config_led(adapter, b_state, b_rate); |
203 | if (!err) | 203 | if (!err) { |
204 | err = len; | 204 | err = len; |
205 | else | ||
206 | ahw->beacon_state = b_state; | 205 | ahw->beacon_state = b_state; |
206 | } | ||
207 | 207 | ||
208 | if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) | 208 | if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) |
209 | qlcnic_diag_free_res(adapter->netdev, max_sds_rings); | 209 | qlcnic_diag_free_res(adapter->netdev, max_sds_rings); |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index a131d7b5d2fe..7e8d68263963 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h | |||
@@ -18,7 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | #define DRV_NAME "qlge" | 19 | #define DRV_NAME "qlge" |
20 | #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " | 20 | #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " |
21 | #define DRV_VERSION "v1.00.00.31" | 21 | #define DRV_VERSION "v1.00.00.32" |
22 | 22 | ||
23 | #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ | 23 | #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ |
24 | 24 | ||
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index 6f316ab23257..0780e039b271 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c | |||
@@ -379,13 +379,13 @@ static int ql_get_settings(struct net_device *ndev, | |||
379 | 379 | ||
380 | ecmd->supported = SUPPORTED_10000baseT_Full; | 380 | ecmd->supported = SUPPORTED_10000baseT_Full; |
381 | ecmd->advertising = ADVERTISED_10000baseT_Full; | 381 | ecmd->advertising = ADVERTISED_10000baseT_Full; |
382 | ecmd->autoneg = AUTONEG_ENABLE; | ||
383 | ecmd->transceiver = XCVR_EXTERNAL; | 382 | ecmd->transceiver = XCVR_EXTERNAL; |
384 | if ((qdev->link_status & STS_LINK_TYPE_MASK) == | 383 | if ((qdev->link_status & STS_LINK_TYPE_MASK) == |
385 | STS_LINK_TYPE_10GBASET) { | 384 | STS_LINK_TYPE_10GBASET) { |
386 | ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); | 385 | ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); |
387 | ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); | 386 | ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); |
388 | ecmd->port = PORT_TP; | 387 | ecmd->port = PORT_TP; |
388 | ecmd->autoneg = AUTONEG_ENABLE; | ||
389 | } else { | 389 | } else { |
390 | ecmd->supported |= SUPPORTED_FIBRE; | 390 | ecmd->supported |= SUPPORTED_FIBRE; |
391 | ecmd->advertising |= ADVERTISED_FIBRE; | 391 | ecmd->advertising |= ADVERTISED_FIBRE; |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index b13ab544a7eb..8033555e53c2 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
@@ -1434,11 +1434,13 @@ map_error: | |||
1434 | } | 1434 | } |
1435 | 1435 | ||
1436 | /* Categorizing receive firmware frame errors */ | 1436 | /* Categorizing receive firmware frame errors */ |
1437 | static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err) | 1437 | static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err, |
1438 | struct rx_ring *rx_ring) | ||
1438 | { | 1439 | { |
1439 | struct nic_stats *stats = &qdev->nic_stats; | 1440 | struct nic_stats *stats = &qdev->nic_stats; |
1440 | 1441 | ||
1441 | stats->rx_err_count++; | 1442 | stats->rx_err_count++; |
1443 | rx_ring->rx_errors++; | ||
1442 | 1444 | ||
1443 | switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) { | 1445 | switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) { |
1444 | case IB_MAC_IOCB_RSP_ERR_CODE_ERR: | 1446 | case IB_MAC_IOCB_RSP_ERR_CODE_ERR: |
@@ -1474,6 +1476,12 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, | |||
1474 | struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); | 1476 | struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); |
1475 | struct napi_struct *napi = &rx_ring->napi; | 1477 | struct napi_struct *napi = &rx_ring->napi; |
1476 | 1478 | ||
1479 | /* Frame error, so drop the packet. */ | ||
1480 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | ||
1481 | ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); | ||
1482 | put_page(lbq_desc->p.pg_chunk.page); | ||
1483 | return; | ||
1484 | } | ||
1477 | napi->dev = qdev->ndev; | 1485 | napi->dev = qdev->ndev; |
1478 | 1486 | ||
1479 | skb = napi_get_frags(napi); | 1487 | skb = napi_get_frags(napi); |
@@ -1529,6 +1537,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, | |||
1529 | addr = lbq_desc->p.pg_chunk.va; | 1537 | addr = lbq_desc->p.pg_chunk.va; |
1530 | prefetch(addr); | 1538 | prefetch(addr); |
1531 | 1539 | ||
1540 | /* Frame error, so drop the packet. */ | ||
1541 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | ||
1542 | ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); | ||
1543 | goto err_out; | ||
1544 | } | ||
1545 | |||
1532 | /* The max framesize filter on this chip is set higher than | 1546 | /* The max framesize filter on this chip is set higher than |
1533 | * MTU since FCoE uses 2k frames. | 1547 | * MTU since FCoE uses 2k frames. |
1534 | */ | 1548 | */ |
@@ -1614,6 +1628,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, | |||
1614 | memcpy(skb_put(new_skb, length), skb->data, length); | 1628 | memcpy(skb_put(new_skb, length), skb->data, length); |
1615 | skb = new_skb; | 1629 | skb = new_skb; |
1616 | 1630 | ||
1631 | /* Frame error, so drop the packet. */ | ||
1632 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | ||
1633 | ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); | ||
1634 | dev_kfree_skb_any(skb); | ||
1635 | return; | ||
1636 | } | ||
1637 | |||
1617 | /* loopback self test for ethtool */ | 1638 | /* loopback self test for ethtool */ |
1618 | if (test_bit(QL_SELFTEST, &qdev->flags)) { | 1639 | if (test_bit(QL_SELFTEST, &qdev->flags)) { |
1619 | ql_check_lb_frame(qdev, skb); | 1640 | ql_check_lb_frame(qdev, skb); |
@@ -1919,6 +1940,13 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, | |||
1919 | return; | 1940 | return; |
1920 | } | 1941 | } |
1921 | 1942 | ||
1943 | /* Frame error, so drop the packet. */ | ||
1944 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | ||
1945 | ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); | ||
1946 | dev_kfree_skb_any(skb); | ||
1947 | return; | ||
1948 | } | ||
1949 | |||
1922 | /* The max framesize filter on this chip is set higher than | 1950 | /* The max framesize filter on this chip is set higher than |
1923 | * MTU since FCoE uses 2k frames. | 1951 | * MTU since FCoE uses 2k frames. |
1924 | */ | 1952 | */ |
@@ -2000,12 +2028,6 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, | |||
2000 | 2028 | ||
2001 | QL_DUMP_IB_MAC_RSP(ib_mac_rsp); | 2029 | QL_DUMP_IB_MAC_RSP(ib_mac_rsp); |
2002 | 2030 | ||
2003 | /* Frame error, so drop the packet. */ | ||
2004 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | ||
2005 | ql_categorize_rx_err(qdev, ib_mac_rsp->flags2); | ||
2006 | return (unsigned long)length; | ||
2007 | } | ||
2008 | |||
2009 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { | 2031 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { |
2010 | /* The data and headers are split into | 2032 | /* The data and headers are split into |
2011 | * separate buffers. | 2033 | * separate buffers. |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 28fb50a1e9c3..4ecbe64a758d 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -3818,6 +3818,30 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp) | |||
3818 | } | 3818 | } |
3819 | } | 3819 | } |
3820 | 3820 | ||
3821 | static void rtl_speed_down(struct rtl8169_private *tp) | ||
3822 | { | ||
3823 | u32 adv; | ||
3824 | int lpa; | ||
3825 | |||
3826 | rtl_writephy(tp, 0x1f, 0x0000); | ||
3827 | lpa = rtl_readphy(tp, MII_LPA); | ||
3828 | |||
3829 | if (lpa & (LPA_10HALF | LPA_10FULL)) | ||
3830 | adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; | ||
3831 | else if (lpa & (LPA_100HALF | LPA_100FULL)) | ||
3832 | adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | ||
3833 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; | ||
3834 | else | ||
3835 | adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | ||
3836 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | | ||
3837 | (tp->mii.supports_gmii ? | ||
3838 | ADVERTISED_1000baseT_Half | | ||
3839 | ADVERTISED_1000baseT_Full : 0); | ||
3840 | |||
3841 | rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, | ||
3842 | adv); | ||
3843 | } | ||
3844 | |||
3821 | static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) | 3845 | static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) |
3822 | { | 3846 | { |
3823 | void __iomem *ioaddr = tp->mmio_addr; | 3847 | void __iomem *ioaddr = tp->mmio_addr; |
@@ -3848,9 +3872,7 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) | |||
3848 | if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) | 3872 | if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) |
3849 | return false; | 3873 | return false; |
3850 | 3874 | ||
3851 | rtl_writephy(tp, 0x1f, 0x0000); | 3875 | rtl_speed_down(tp); |
3852 | rtl_writephy(tp, MII_BMCR, 0x0000); | ||
3853 | |||
3854 | rtl_wol_suspend_quirk(tp); | 3876 | rtl_wol_suspend_quirk(tp); |
3855 | 3877 | ||
3856 | return true; | 3878 | return true; |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index bf5e3cf97c4d..6ed333fe5c04 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -1216,10 +1216,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) | |||
1216 | if (felic_stat & ECSR_LCHNG) { | 1216 | if (felic_stat & ECSR_LCHNG) { |
1217 | /* Link Changed */ | 1217 | /* Link Changed */ |
1218 | if (mdp->cd->no_psr || mdp->no_ether_link) { | 1218 | if (mdp->cd->no_psr || mdp->no_ether_link) { |
1219 | if (mdp->link == PHY_DOWN) | 1219 | goto ignore_link; |
1220 | link_stat = 0; | ||
1221 | else | ||
1222 | link_stat = PHY_ST_LINK; | ||
1223 | } else { | 1220 | } else { |
1224 | link_stat = (sh_eth_read(ndev, PSR)); | 1221 | link_stat = (sh_eth_read(ndev, PSR)); |
1225 | if (mdp->ether_link_active_low) | 1222 | if (mdp->ether_link_active_low) |
@@ -1242,6 +1239,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) | |||
1242 | } | 1239 | } |
1243 | } | 1240 | } |
1244 | 1241 | ||
1242 | ignore_link: | ||
1245 | if (intr_status & EESR_TWB) { | 1243 | if (intr_status & EESR_TWB) { |
1246 | /* Write buck end. unused write back interrupt */ | 1244 | /* Write buck end. unused write back interrupt */ |
1247 | if (intr_status & EESR_TABT) /* Transmit Abort int */ | 1245 | if (intr_status & EESR_TABT) /* Transmit Abort int */ |
@@ -1326,12 +1324,18 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
1326 | struct sh_eth_private *mdp = netdev_priv(ndev); | 1324 | struct sh_eth_private *mdp = netdev_priv(ndev); |
1327 | struct sh_eth_cpu_data *cd = mdp->cd; | 1325 | struct sh_eth_cpu_data *cd = mdp->cd; |
1328 | irqreturn_t ret = IRQ_NONE; | 1326 | irqreturn_t ret = IRQ_NONE; |
1329 | u32 intr_status = 0; | 1327 | unsigned long intr_status; |
1330 | 1328 | ||
1331 | spin_lock(&mdp->lock); | 1329 | spin_lock(&mdp->lock); |
1332 | 1330 | ||
1333 | /* Get interrpt stat */ | 1331 | /* Get interrupt status */ |
1334 | intr_status = sh_eth_read(ndev, EESR); | 1332 | intr_status = sh_eth_read(ndev, EESR); |
1333 | /* Mask it with the interrupt mask, forcing ECI interrupt to be always | ||
1334 | * enabled since it's the one that comes thru regardless of the mask, | ||
1335 | * and we need to fully handle it in sh_eth_error() in order to quench | ||
1336 | * it as it doesn't get cleared by just writing 1 to the ECI bit... | ||
1337 | */ | ||
1338 | intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI; | ||
1335 | /* Clear interrupt */ | 1339 | /* Clear interrupt */ |
1336 | if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | | 1340 | if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | |
1337 | EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | | 1341 | EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | |
@@ -1373,7 +1377,7 @@ static void sh_eth_adjust_link(struct net_device *ndev) | |||
1373 | struct phy_device *phydev = mdp->phydev; | 1377 | struct phy_device *phydev = mdp->phydev; |
1374 | int new_state = 0; | 1378 | int new_state = 0; |
1375 | 1379 | ||
1376 | if (phydev->link != PHY_DOWN) { | 1380 | if (phydev->link) { |
1377 | if (phydev->duplex != mdp->duplex) { | 1381 | if (phydev->duplex != mdp->duplex) { |
1378 | new_state = 1; | 1382 | new_state = 1; |
1379 | mdp->duplex = phydev->duplex; | 1383 | mdp->duplex = phydev->duplex; |
@@ -1387,17 +1391,21 @@ static void sh_eth_adjust_link(struct net_device *ndev) | |||
1387 | if (mdp->cd->set_rate) | 1391 | if (mdp->cd->set_rate) |
1388 | mdp->cd->set_rate(ndev); | 1392 | mdp->cd->set_rate(ndev); |
1389 | } | 1393 | } |
1390 | if (mdp->link == PHY_DOWN) { | 1394 | if (!mdp->link) { |
1391 | sh_eth_write(ndev, | 1395 | sh_eth_write(ndev, |
1392 | (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); | 1396 | (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); |
1393 | new_state = 1; | 1397 | new_state = 1; |
1394 | mdp->link = phydev->link; | 1398 | mdp->link = phydev->link; |
1399 | if (mdp->cd->no_psr || mdp->no_ether_link) | ||
1400 | sh_eth_rcv_snd_enable(ndev); | ||
1395 | } | 1401 | } |
1396 | } else if (mdp->link) { | 1402 | } else if (mdp->link) { |
1397 | new_state = 1; | 1403 | new_state = 1; |
1398 | mdp->link = PHY_DOWN; | 1404 | mdp->link = 0; |
1399 | mdp->speed = 0; | 1405 | mdp->speed = 0; |
1400 | mdp->duplex = -1; | 1406 | mdp->duplex = -1; |
1407 | if (mdp->cd->no_psr || mdp->no_ether_link) | ||
1408 | sh_eth_rcv_snd_disable(ndev); | ||
1401 | } | 1409 | } |
1402 | 1410 | ||
1403 | if (new_state && netif_msg_link(mdp)) | 1411 | if (new_state && netif_msg_link(mdp)) |
@@ -1414,7 +1422,7 @@ static int sh_eth_phy_init(struct net_device *ndev) | |||
1414 | snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, | 1422 | snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, |
1415 | mdp->mii_bus->id , mdp->phy_id); | 1423 | mdp->mii_bus->id , mdp->phy_id); |
1416 | 1424 | ||
1417 | mdp->link = PHY_DOWN; | 1425 | mdp->link = 0; |
1418 | mdp->speed = 0; | 1426 | mdp->speed = 0; |
1419 | mdp->duplex = -1; | 1427 | mdp->duplex = -1; |
1420 | 1428 | ||
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index e6655678458e..828be4515008 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
@@ -723,7 +723,7 @@ struct sh_eth_private { | |||
723 | u32 phy_id; /* PHY ID */ | 723 | u32 phy_id; /* PHY ID */ |
724 | struct mii_bus *mii_bus; /* MDIO bus control */ | 724 | struct mii_bus *mii_bus; /* MDIO bus control */ |
725 | struct phy_device *phydev; /* PHY device control */ | 725 | struct phy_device *phydev; /* PHY device control */ |
726 | enum phy_state link; | 726 | int link; |
727 | phy_interface_t phy_interface; | 727 | phy_interface_t phy_interface; |
728 | int msg_enable; | 728 | int msg_enable; |
729 | int speed; | 729 | int speed; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index 0c74a702d461..50617c5a0bdb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c | |||
@@ -149,6 +149,7 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) | |||
149 | { | 149 | { |
150 | writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); | 150 | writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); |
151 | writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); | 151 | writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); |
152 | writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK); | ||
152 | } | 153 | } |
153 | 154 | ||
154 | /* This reads the MAC core counters (if actaully supported). | 155 | /* This reads the MAC core counters (if actaully supported). |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index df32a090d08e..4781d3d8e182 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -436,7 +436,7 @@ void cpsw_tx_handler(void *token, int len, int status) | |||
436 | * queue is stopped then start the queue as we have free desc for tx | 436 | * queue is stopped then start the queue as we have free desc for tx |
437 | */ | 437 | */ |
438 | if (unlikely(netif_queue_stopped(ndev))) | 438 | if (unlikely(netif_queue_stopped(ndev))) |
439 | netif_start_queue(ndev); | 439 | netif_wake_queue(ndev); |
440 | cpts_tx_timestamp(priv->cpts, skb); | 440 | cpts_tx_timestamp(priv->cpts, skb); |
441 | priv->stats.tx_packets++; | 441 | priv->stats.tx_packets++; |
442 | priv->stats.tx_bytes += len; | 442 | priv->stats.tx_bytes += len; |
@@ -1380,7 +1380,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, | |||
1380 | memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); | 1380 | memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); |
1381 | 1381 | ||
1382 | if (data->dual_emac) { | 1382 | if (data->dual_emac) { |
1383 | if (of_property_read_u32(node, "dual_emac_res_vlan", | 1383 | if (of_property_read_u32(slave_node, "dual_emac_res_vlan", |
1384 | &prop)) { | 1384 | &prop)) { |
1385 | pr_err("Missing dual_emac_res_vlan in DT.\n"); | 1385 | pr_err("Missing dual_emac_res_vlan in DT.\n"); |
1386 | slave_data->dual_emac_res_vlan = i+1; | 1386 | slave_data->dual_emac_res_vlan = i+1; |
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index ae1b77aa199f..72300bc9e378 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
@@ -1053,7 +1053,7 @@ static void emac_tx_handler(void *token, int len, int status) | |||
1053 | * queue is stopped then start the queue as we have free desc for tx | 1053 | * queue is stopped then start the queue as we have free desc for tx |
1054 | */ | 1054 | */ |
1055 | if (unlikely(netif_queue_stopped(ndev))) | 1055 | if (unlikely(netif_queue_stopped(ndev))) |
1056 | netif_start_queue(ndev); | 1056 | netif_wake_queue(ndev); |
1057 | ndev->stats.tx_packets++; | 1057 | ndev->stats.tx_packets++; |
1058 | ndev->stats.tx_bytes += len; | 1058 | ndev->stats.tx_bytes += len; |
1059 | dev_kfree_skb_any(skb); | 1059 | dev_kfree_skb_any(skb); |