diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/e100.c | 30 | ||||
-rw-r--r-- | drivers/net/forcedeth.c | 31 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_common.c | 51 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 10 | ||||
-rw-r--r-- | drivers/net/mlx4/en_netdev.c | 2 | ||||
-rw-r--r-- | drivers/net/mlx4/en_rx.c | 4 | ||||
-rw-r--r-- | drivers/net/veth.c | 41 |
7 files changed, 71 insertions, 98 deletions
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 5c0b457c7868..0f9ee1348552 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -2728,7 +2728,7 @@ static void __devexit e100_remove(struct pci_dev *pdev) | |||
2728 | #define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ | 2728 | #define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ |
2729 | #define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ | 2729 | #define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ |
2730 | #define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ | 2730 | #define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ |
2731 | static int e100_suspend(struct pci_dev *pdev, pm_message_t state) | 2731 | static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) |
2732 | { | 2732 | { |
2733 | struct net_device *netdev = pci_get_drvdata(pdev); | 2733 | struct net_device *netdev = pci_get_drvdata(pdev); |
2734 | struct nic *nic = netdev_priv(netdev); | 2734 | struct nic *nic = netdev_priv(netdev); |
@@ -2749,19 +2749,32 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state) | |||
2749 | E100_82552_SMARTSPEED, smartspeed | | 2749 | E100_82552_SMARTSPEED, smartspeed | |
2750 | E100_82552_REV_ANEG | E100_82552_ANEG_NOW); | 2750 | E100_82552_REV_ANEG | E100_82552_ANEG_NOW); |
2751 | } | 2751 | } |
2752 | if (pci_enable_wake(pdev, PCI_D3cold, true)) | 2752 | *enable_wake = true; |
2753 | pci_enable_wake(pdev, PCI_D3hot, true); | ||
2754 | } else { | 2753 | } else { |
2755 | pci_enable_wake(pdev, PCI_D3hot, false); | 2754 | *enable_wake = false; |
2756 | } | 2755 | } |
2757 | 2756 | ||
2758 | pci_disable_device(pdev); | 2757 | pci_disable_device(pdev); |
2759 | pci_set_power_state(pdev, PCI_D3hot); | 2758 | } |
2760 | 2759 | ||
2761 | return 0; | 2760 | static int __e100_power_off(struct pci_dev *pdev, bool wake) |
2761 | { | ||
2762 | if (wake) { | ||
2763 | return pci_prepare_to_sleep(pdev); | ||
2764 | } else { | ||
2765 | pci_wake_from_d3(pdev, false); | ||
2766 | return pci_set_power_state(pdev, PCI_D3hot); | ||
2767 | } | ||
2762 | } | 2768 | } |
2763 | 2769 | ||
2764 | #ifdef CONFIG_PM | 2770 | #ifdef CONFIG_PM |
2771 | static int e100_suspend(struct pci_dev *pdev, pm_message_t state) | ||
2772 | { | ||
2773 | bool wake; | ||
2774 | __e100_shutdown(pdev, &wake); | ||
2775 | return __e100_power_off(pdev, wake); | ||
2776 | } | ||
2777 | |||
2765 | static int e100_resume(struct pci_dev *pdev) | 2778 | static int e100_resume(struct pci_dev *pdev) |
2766 | { | 2779 | { |
2767 | struct net_device *netdev = pci_get_drvdata(pdev); | 2780 | struct net_device *netdev = pci_get_drvdata(pdev); |
@@ -2792,7 +2805,10 @@ static int e100_resume(struct pci_dev *pdev) | |||
2792 | 2805 | ||
2793 | static void e100_shutdown(struct pci_dev *pdev) | 2806 | static void e100_shutdown(struct pci_dev *pdev) |
2794 | { | 2807 | { |
2795 | e100_suspend(pdev, PMSG_SUSPEND); | 2808 | bool wake; |
2809 | __e100_shutdown(pdev, &wake); | ||
2810 | if (system_state == SYSTEM_POWER_OFF) | ||
2811 | __e100_power_off(pdev, wake); | ||
2796 | } | 2812 | } |
2797 | 2813 | ||
2798 | /* ------------------ PCI Error Recovery infrastructure -------------- */ | 2814 | /* ------------------ PCI Error Recovery infrastructure -------------- */ |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 11d5db16ed9c..f9a846b1b92f 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -1880,6 +1880,7 @@ static void nv_init_tx(struct net_device *dev) | |||
1880 | np->tx_pkts_in_progress = 0; | 1880 | np->tx_pkts_in_progress = 0; |
1881 | np->tx_change_owner = NULL; | 1881 | np->tx_change_owner = NULL; |
1882 | np->tx_end_flip = NULL; | 1882 | np->tx_end_flip = NULL; |
1883 | np->tx_stop = 0; | ||
1883 | 1884 | ||
1884 | for (i = 0; i < np->tx_ring_size; i++) { | 1885 | for (i = 0; i < np->tx_ring_size; i++) { |
1885 | if (!nv_optimized(np)) { | 1886 | if (!nv_optimized(np)) { |
@@ -2530,6 +2531,8 @@ static void nv_tx_timeout(struct net_device *dev) | |||
2530 | struct fe_priv *np = netdev_priv(dev); | 2531 | struct fe_priv *np = netdev_priv(dev); |
2531 | u8 __iomem *base = get_hwbase(dev); | 2532 | u8 __iomem *base = get_hwbase(dev); |
2532 | u32 status; | 2533 | u32 status; |
2534 | union ring_type put_tx; | ||
2535 | int saved_tx_limit; | ||
2533 | 2536 | ||
2534 | if (np->msi_flags & NV_MSI_X_ENABLED) | 2537 | if (np->msi_flags & NV_MSI_X_ENABLED) |
2535 | status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | 2538 | status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; |
@@ -2589,24 +2592,32 @@ static void nv_tx_timeout(struct net_device *dev) | |||
2589 | /* 1) stop tx engine */ | 2592 | /* 1) stop tx engine */ |
2590 | nv_stop_tx(dev); | 2593 | nv_stop_tx(dev); |
2591 | 2594 | ||
2592 | /* 2) check that the packets were not sent already: */ | 2595 | /* 2) complete any outstanding tx and do not give HW any limited tx pkts */ |
2596 | saved_tx_limit = np->tx_limit; | ||
2597 | np->tx_limit = 0; /* prevent giving HW any limited pkts */ | ||
2598 | np->tx_stop = 0; /* prevent waking tx queue */ | ||
2593 | if (!nv_optimized(np)) | 2599 | if (!nv_optimized(np)) |
2594 | nv_tx_done(dev, np->tx_ring_size); | 2600 | nv_tx_done(dev, np->tx_ring_size); |
2595 | else | 2601 | else |
2596 | nv_tx_done_optimized(dev, np->tx_ring_size); | 2602 | nv_tx_done_optimized(dev, np->tx_ring_size); |
2597 | 2603 | ||
2598 | /* 3) if there are dead entries: clear everything */ | 2604 | /* save current HW postion */ |
2599 | if (np->get_tx_ctx != np->put_tx_ctx) { | 2605 | if (np->tx_change_owner) |
2600 | printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); | 2606 | put_tx.ex = np->tx_change_owner->first_tx_desc; |
2601 | nv_drain_tx(dev); | 2607 | else |
2602 | nv_init_tx(dev); | 2608 | put_tx = np->put_tx; |
2603 | setup_hw_rings(dev, NV_SETUP_TX_RING); | ||
2604 | } | ||
2605 | 2609 | ||
2606 | netif_wake_queue(dev); | 2610 | /* 3) clear all tx state */ |
2611 | nv_drain_tx(dev); | ||
2612 | nv_init_tx(dev); | ||
2613 | |||
2614 | /* 4) restore state to current HW position */ | ||
2615 | np->get_tx = np->put_tx = put_tx; | ||
2616 | np->tx_limit = saved_tx_limit; | ||
2607 | 2617 | ||
2608 | /* 4) restart tx engine */ | 2618 | /* 5) restart tx engine */ |
2609 | nv_start_tx(dev); | 2619 | nv_start_tx(dev); |
2620 | netif_wake_queue(dev); | ||
2610 | spin_unlock_irq(&np->lock); | 2621 | spin_unlock_irq(&np->lock); |
2611 | } | 2622 | } |
2612 | 2623 | ||
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 5567519676d5..186a65069b33 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c | |||
@@ -50,7 +50,6 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw); | |||
50 | static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index); | 50 | static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index); |
51 | static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index); | 51 | static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index); |
52 | static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); | 52 | static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); |
53 | static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr); | ||
54 | static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); | 53 | static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); |
55 | 54 | ||
56 | /** | 55 | /** |
@@ -1377,8 +1376,7 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, | |||
1377 | * Clear accounting of old secondary address list, | 1376 | * Clear accounting of old secondary address list, |
1378 | * don't count RAR[0] | 1377 | * don't count RAR[0] |
1379 | */ | 1378 | */ |
1380 | uc_addr_in_use = hw->addr_ctrl.rar_used_count - | 1379 | uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; |
1381 | hw->addr_ctrl.mc_addr_in_rar_count - 1; | ||
1382 | hw->addr_ctrl.rar_used_count -= uc_addr_in_use; | 1380 | hw->addr_ctrl.rar_used_count -= uc_addr_in_use; |
1383 | hw->addr_ctrl.overflow_promisc = 0; | 1381 | hw->addr_ctrl.overflow_promisc = 0; |
1384 | 1382 | ||
@@ -1493,40 +1491,6 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) | |||
1493 | } | 1491 | } |
1494 | 1492 | ||
1495 | /** | 1493 | /** |
1496 | * ixgbe_add_mc_addr - Adds a multicast address. | ||
1497 | * @hw: pointer to hardware structure | ||
1498 | * @mc_addr: new multicast address | ||
1499 | * | ||
1500 | * Adds it to unused receive address register or to the multicast table. | ||
1501 | **/ | ||
1502 | static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) | ||
1503 | { | ||
1504 | u32 rar_entries = hw->mac.num_rar_entries; | ||
1505 | u32 rar; | ||
1506 | |||
1507 | hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", | ||
1508 | mc_addr[0], mc_addr[1], mc_addr[2], | ||
1509 | mc_addr[3], mc_addr[4], mc_addr[5]); | ||
1510 | |||
1511 | /* | ||
1512 | * Place this multicast address in the RAR if there is room, | ||
1513 | * else put it in the MTA | ||
1514 | */ | ||
1515 | if (hw->addr_ctrl.rar_used_count < rar_entries) { | ||
1516 | /* use RAR from the end up for multicast */ | ||
1517 | rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1; | ||
1518 | hw->mac.ops.set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV); | ||
1519 | hw_dbg(hw, "Added a multicast address to RAR[%d]\n", rar); | ||
1520 | hw->addr_ctrl.rar_used_count++; | ||
1521 | hw->addr_ctrl.mc_addr_in_rar_count++; | ||
1522 | } else { | ||
1523 | ixgbe_set_mta(hw, mc_addr); | ||
1524 | } | ||
1525 | |||
1526 | hw_dbg(hw, "ixgbe_add_mc_addr Complete\n"); | ||
1527 | } | ||
1528 | |||
1529 | /** | ||
1530 | * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses | 1494 | * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses |
1531 | * @hw: pointer to hardware structure | 1495 | * @hw: pointer to hardware structure |
1532 | * @mc_addr_list: the list of new multicast addresses | 1496 | * @mc_addr_list: the list of new multicast addresses |
@@ -1542,7 +1506,6 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, | |||
1542 | u32 mc_addr_count, ixgbe_mc_addr_itr next) | 1506 | u32 mc_addr_count, ixgbe_mc_addr_itr next) |
1543 | { | 1507 | { |
1544 | u32 i; | 1508 | u32 i; |
1545 | u32 rar_entries = hw->mac.num_rar_entries; | ||
1546 | u32 vmdq; | 1509 | u32 vmdq; |
1547 | 1510 | ||
1548 | /* | 1511 | /* |
@@ -1550,18 +1513,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, | |||
1550 | * use. | 1513 | * use. |
1551 | */ | 1514 | */ |
1552 | hw->addr_ctrl.num_mc_addrs = mc_addr_count; | 1515 | hw->addr_ctrl.num_mc_addrs = mc_addr_count; |
1553 | hw->addr_ctrl.rar_used_count -= hw->addr_ctrl.mc_addr_in_rar_count; | ||
1554 | hw->addr_ctrl.mc_addr_in_rar_count = 0; | ||
1555 | hw->addr_ctrl.mta_in_use = 0; | 1516 | hw->addr_ctrl.mta_in_use = 0; |
1556 | 1517 | ||
1557 | /* Zero out the other receive addresses. */ | ||
1558 | hw_dbg(hw, "Clearing RAR[%d-%d]\n", hw->addr_ctrl.rar_used_count, | ||
1559 | rar_entries - 1); | ||
1560 | for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) { | ||
1561 | IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); | ||
1562 | IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); | ||
1563 | } | ||
1564 | |||
1565 | /* Clear the MTA */ | 1518 | /* Clear the MTA */ |
1566 | hw_dbg(hw, " Clearing MTA\n"); | 1519 | hw_dbg(hw, " Clearing MTA\n"); |
1567 | for (i = 0; i < hw->mac.mcft_size; i++) | 1520 | for (i = 0; i < hw->mac.mcft_size; i++) |
@@ -1570,7 +1523,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, | |||
1570 | /* Add the new addresses */ | 1523 | /* Add the new addresses */ |
1571 | for (i = 0; i < mc_addr_count; i++) { | 1524 | for (i = 0; i < mc_addr_count; i++) { |
1572 | hw_dbg(hw, " Adding the multicast addresses:\n"); | 1525 | hw_dbg(hw, " Adding the multicast addresses:\n"); |
1573 | ixgbe_add_mc_addr(hw, next(hw, &mc_addr_list, &vmdq)); | 1526 | ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); |
1574 | } | 1527 | } |
1575 | 1528 | ||
1576 | /* Enable mta */ | 1529 | /* Enable mta */ |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 01884256f4c9..07e778d3e5d2 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -3646,6 +3646,8 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
3646 | 3646 | ||
3647 | ixgbe_reset(adapter); | 3647 | ixgbe_reset(adapter); |
3648 | 3648 | ||
3649 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); | ||
3650 | |||
3649 | if (netif_running(netdev)) { | 3651 | if (netif_running(netdev)) { |
3650 | err = ixgbe_open(adapter->netdev); | 3652 | err = ixgbe_open(adapter->netdev); |
3651 | if (err) | 3653 | if (err) |
@@ -4575,7 +4577,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
4575 | const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; | 4577 | const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; |
4576 | static int cards_found; | 4578 | static int cards_found; |
4577 | int i, err, pci_using_dac; | 4579 | int i, err, pci_using_dac; |
4578 | u16 pm_value = 0; | ||
4579 | u32 part_num, eec; | 4580 | u32 part_num, eec; |
4580 | 4581 | ||
4581 | err = pci_enable_device(pdev); | 4582 | err = pci_enable_device(pdev); |
@@ -4763,11 +4764,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
4763 | 4764 | ||
4764 | switch (pdev->device) { | 4765 | switch (pdev->device) { |
4765 | case IXGBE_DEV_ID_82599_KX4: | 4766 | case IXGBE_DEV_ID_82599_KX4: |
4766 | #define IXGBE_PCIE_PMCSR 0x44 | 4767 | adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | |
4767 | adapter->wol = IXGBE_WUFC_MAG; | 4768 | IXGBE_WUFC_MC | IXGBE_WUFC_BC); |
4768 | pci_read_config_word(pdev, IXGBE_PCIE_PMCSR, &pm_value); | ||
4769 | pci_write_config_word(pdev, IXGBE_PCIE_PMCSR, | ||
4770 | (pm_value | (1 << 8))); | ||
4771 | break; | 4769 | break; |
4772 | default: | 4770 | default: |
4773 | adapter->wol = 0; | 4771 | adapter->wol = 0; |
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 438678ab2a10..7bcc49de1637 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -583,7 +583,7 @@ int mlx4_en_start_port(struct net_device *dev) | |||
583 | err = mlx4_en_activate_cq(priv, cq); | 583 | err = mlx4_en_activate_cq(priv, cq); |
584 | if (err) { | 584 | if (err) { |
585 | mlx4_err(mdev, "Failed activating Rx CQ\n"); | 585 | mlx4_err(mdev, "Failed activating Rx CQ\n"); |
586 | goto rx_err; | 586 | goto cq_err; |
587 | } | 587 | } |
588 | for (j = 0; j < cq->size; j++) | 588 | for (j = 0; j < cq->size; j++) |
589 | cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; | 589 | cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; |
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index 0cbb78ca7b29..7942c4d3cd88 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c | |||
@@ -610,6 +610,10 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, | |||
610 | used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags, | 610 | used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags, |
611 | skb_shinfo(skb)->frags, | 611 | skb_shinfo(skb)->frags, |
612 | page_alloc, length); | 612 | page_alloc, length); |
613 | if (unlikely(!used_frags)) { | ||
614 | kfree_skb(skb); | ||
615 | return NULL; | ||
616 | } | ||
613 | skb_shinfo(skb)->nr_frags = used_frags; | 617 | skb_shinfo(skb)->nr_frags = used_frags; |
614 | 618 | ||
615 | /* Copy headers into the skb linear buffer */ | 619 | /* Copy headers into the skb linear buffer */ |
diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 015db1cece72..8e56fcf0a0e3 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c | |||
@@ -210,14 +210,11 @@ rx_drop: | |||
210 | 210 | ||
211 | static struct net_device_stats *veth_get_stats(struct net_device *dev) | 211 | static struct net_device_stats *veth_get_stats(struct net_device *dev) |
212 | { | 212 | { |
213 | struct veth_priv *priv; | 213 | struct veth_priv *priv = netdev_priv(dev); |
214 | struct net_device_stats *dev_stats; | 214 | struct net_device_stats *dev_stats = &dev->stats; |
215 | int cpu; | 215 | unsigned int cpu; |
216 | struct veth_net_stats *stats; | 216 | struct veth_net_stats *stats; |
217 | 217 | ||
218 | priv = netdev_priv(dev); | ||
219 | dev_stats = &dev->stats; | ||
220 | |||
221 | dev_stats->rx_packets = 0; | 218 | dev_stats->rx_packets = 0; |
222 | dev_stats->tx_packets = 0; | 219 | dev_stats->tx_packets = 0; |
223 | dev_stats->rx_bytes = 0; | 220 | dev_stats->rx_bytes = 0; |
@@ -225,16 +222,17 @@ static struct net_device_stats *veth_get_stats(struct net_device *dev) | |||
225 | dev_stats->tx_dropped = 0; | 222 | dev_stats->tx_dropped = 0; |
226 | dev_stats->rx_dropped = 0; | 223 | dev_stats->rx_dropped = 0; |
227 | 224 | ||
228 | for_each_online_cpu(cpu) { | 225 | if (priv->stats) |
229 | stats = per_cpu_ptr(priv->stats, cpu); | 226 | for_each_online_cpu(cpu) { |
227 | stats = per_cpu_ptr(priv->stats, cpu); | ||
230 | 228 | ||
231 | dev_stats->rx_packets += stats->rx_packets; | 229 | dev_stats->rx_packets += stats->rx_packets; |
232 | dev_stats->tx_packets += stats->tx_packets; | 230 | dev_stats->tx_packets += stats->tx_packets; |
233 | dev_stats->rx_bytes += stats->rx_bytes; | 231 | dev_stats->rx_bytes += stats->rx_bytes; |
234 | dev_stats->tx_bytes += stats->tx_bytes; | 232 | dev_stats->tx_bytes += stats->tx_bytes; |
235 | dev_stats->tx_dropped += stats->tx_dropped; | 233 | dev_stats->tx_dropped += stats->tx_dropped; |
236 | dev_stats->rx_dropped += stats->rx_dropped; | 234 | dev_stats->rx_dropped += stats->rx_dropped; |
237 | } | 235 | } |
238 | 236 | ||
239 | return dev_stats; | 237 | return dev_stats; |
240 | } | 238 | } |
@@ -261,6 +259,8 @@ static int veth_close(struct net_device *dev) | |||
261 | netif_carrier_off(dev); | 259 | netif_carrier_off(dev); |
262 | netif_carrier_off(priv->peer); | 260 | netif_carrier_off(priv->peer); |
263 | 261 | ||
262 | free_percpu(priv->stats); | ||
263 | priv->stats = NULL; | ||
264 | return 0; | 264 | return 0; |
265 | } | 265 | } |
266 | 266 | ||
@@ -291,15 +291,6 @@ static int veth_dev_init(struct net_device *dev) | |||
291 | return 0; | 291 | return 0; |
292 | } | 292 | } |
293 | 293 | ||
294 | static void veth_dev_free(struct net_device *dev) | ||
295 | { | ||
296 | struct veth_priv *priv; | ||
297 | |||
298 | priv = netdev_priv(dev); | ||
299 | free_percpu(priv->stats); | ||
300 | free_netdev(dev); | ||
301 | } | ||
302 | |||
303 | static const struct net_device_ops veth_netdev_ops = { | 294 | static const struct net_device_ops veth_netdev_ops = { |
304 | .ndo_init = veth_dev_init, | 295 | .ndo_init = veth_dev_init, |
305 | .ndo_open = veth_open, | 296 | .ndo_open = veth_open, |
@@ -317,7 +308,7 @@ static void veth_setup(struct net_device *dev) | |||
317 | dev->netdev_ops = &veth_netdev_ops; | 308 | dev->netdev_ops = &veth_netdev_ops; |
318 | dev->ethtool_ops = &veth_ethtool_ops; | 309 | dev->ethtool_ops = &veth_ethtool_ops; |
319 | dev->features |= NETIF_F_LLTX; | 310 | dev->features |= NETIF_F_LLTX; |
320 | dev->destructor = veth_dev_free; | 311 | dev->destructor = free_netdev; |
321 | } | 312 | } |
322 | 313 | ||
323 | /* | 314 | /* |