aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb/igb_main.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/net/igb/igb_main.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'drivers/net/igb/igb_main.c')
-rw-r--r--drivers/net/igb/igb_main.c531
1 files changed, 441 insertions, 90 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 9b4e5895f5f9..2c28621eb30b 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -45,17 +45,23 @@
45#include <linux/interrupt.h> 45#include <linux/interrupt.h>
46#include <linux/if_ether.h> 46#include <linux/if_ether.h>
47#include <linux/aer.h> 47#include <linux/aer.h>
48#include <linux/prefetch.h>
48#ifdef CONFIG_IGB_DCA 49#ifdef CONFIG_IGB_DCA
49#include <linux/dca.h> 50#include <linux/dca.h>
50#endif 51#endif
51#include "igb.h" 52#include "igb.h"
52 53
53#define DRV_VERSION "2.1.0-k2" 54#define MAJ 3
55#define MIN 0
56#define BUILD 6
57#define KFIX 2
58#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
59__stringify(BUILD) "-k" __stringify(KFIX)
54char igb_driver_name[] = "igb"; 60char igb_driver_name[] = "igb";
55char igb_driver_version[] = DRV_VERSION; 61char igb_driver_version[] = DRV_VERSION;
56static const char igb_driver_string[] = 62static const char igb_driver_string[] =
57 "Intel(R) Gigabit Ethernet Network Driver"; 63 "Intel(R) Gigabit Ethernet Network Driver";
58static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation."; 64static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
59 65
60static const struct e1000_info *igb_info_tbl[] = { 66static const struct e1000_info *igb_info_tbl[] = {
61 [board_82575] = &e1000_82575_info, 67 [board_82575] = &e1000_82575_info,
@@ -68,9 +74,14 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, 74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, 75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, 76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, 78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, 79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, 80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, 85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, 86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, 87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
@@ -94,9 +105,9 @@ static int igb_setup_all_rx_resources(struct igb_adapter *);
94static void igb_free_all_tx_resources(struct igb_adapter *); 105static void igb_free_all_tx_resources(struct igb_adapter *);
95static void igb_free_all_rx_resources(struct igb_adapter *); 106static void igb_free_all_rx_resources(struct igb_adapter *);
96static void igb_setup_mrqc(struct igb_adapter *); 107static void igb_setup_mrqc(struct igb_adapter *);
97void igb_update_stats(struct igb_adapter *);
98static int igb_probe(struct pci_dev *, const struct pci_device_id *); 108static int igb_probe(struct pci_dev *, const struct pci_device_id *);
99static void __devexit igb_remove(struct pci_dev *pdev); 109static void __devexit igb_remove(struct pci_dev *pdev);
110static void igb_init_hw_timer(struct igb_adapter *adapter);
100static int igb_sw_init(struct igb_adapter *); 111static int igb_sw_init(struct igb_adapter *);
101static int igb_open(struct net_device *); 112static int igb_open(struct net_device *);
102static int igb_close(struct net_device *); 113static int igb_close(struct net_device *);
@@ -111,7 +122,8 @@ static void igb_update_phy_info(unsigned long);
111static void igb_watchdog(unsigned long); 122static void igb_watchdog(unsigned long);
112static void igb_watchdog_task(struct work_struct *); 123static void igb_watchdog_task(struct work_struct *);
113static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *); 124static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
114static struct net_device_stats *igb_get_stats(struct net_device *); 125static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
126 struct rtnl_link_stats64 *stats);
115static int igb_change_mtu(struct net_device *, int); 127static int igb_change_mtu(struct net_device *, int);
116static int igb_set_mac(struct net_device *, void *); 128static int igb_set_mac(struct net_device *, void *);
117static void igb_set_uta(struct igb_adapter *adapter); 129static void igb_set_uta(struct igb_adapter *adapter);
@@ -145,6 +157,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
145static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); 157static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
146static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, 158static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
147 struct ifla_vf_info *ivi); 159 struct ifla_vf_info *ivi);
160static void igb_check_vf_rate_limit(struct igb_adapter *);
148 161
149#ifdef CONFIG_PM 162#ifdef CONFIG_PM
150static int igb_suspend(struct pci_dev *, pm_message_t); 163static int igb_suspend(struct pci_dev *, pm_message_t);
@@ -188,7 +201,7 @@ static struct pci_driver igb_driver = {
188 .probe = igb_probe, 201 .probe = igb_probe,
189 .remove = __devexit_p(igb_remove), 202 .remove = __devexit_p(igb_remove),
190#ifdef CONFIG_PM 203#ifdef CONFIG_PM
191 /* Power Managment Hooks */ 204 /* Power Management Hooks */
192 .suspend = igb_suspend, 205 .suspend = igb_suspend,
193 .resume = igb_resume, 206 .resume = igb_resume,
194#endif 207#endif
@@ -986,7 +999,7 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
986 * Attempt to configure interrupts using the best available 999 * Attempt to configure interrupts using the best available
987 * capabilities of the hardware and kernel. 1000 * capabilities of the hardware and kernel.
988 **/ 1001 **/
989static void igb_set_interrupt_capability(struct igb_adapter *adapter) 1002static int igb_set_interrupt_capability(struct igb_adapter *adapter)
990{ 1003{
991 int err; 1004 int err;
992 int numvecs, i; 1005 int numvecs, i;
@@ -1052,8 +1065,10 @@ msi_only:
1052 if (!pci_enable_msi(adapter->pdev)) 1065 if (!pci_enable_msi(adapter->pdev))
1053 adapter->flags |= IGB_FLAG_HAS_MSI; 1066 adapter->flags |= IGB_FLAG_HAS_MSI;
1054out: 1067out:
1055 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 1068 /* Notify the stack of the (possibly) reduced queue counts. */
1056 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; 1069 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1070 return netif_set_real_num_rx_queues(adapter->netdev,
1071 adapter->num_rx_queues);
1057} 1072}
1058 1073
1059/** 1074/**
@@ -1152,7 +1167,9 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1152 struct pci_dev *pdev = adapter->pdev; 1167 struct pci_dev *pdev = adapter->pdev;
1153 int err; 1168 int err;
1154 1169
1155 igb_set_interrupt_capability(adapter); 1170 err = igb_set_interrupt_capability(adapter);
1171 if (err)
1172 return err;
1156 1173
1157 err = igb_alloc_q_vectors(adapter); 1174 err = igb_alloc_q_vectors(adapter);
1158 if (err) { 1175 if (err) {
@@ -1530,7 +1547,9 @@ void igb_down(struct igb_adapter *adapter)
1530 netif_carrier_off(netdev); 1547 netif_carrier_off(netdev);
1531 1548
1532 /* record the stats before reset*/ 1549 /* record the stats before reset*/
1533 igb_update_stats(adapter); 1550 spin_lock(&adapter->stats64_lock);
1551 igb_update_stats(adapter, &adapter->stats64);
1552 spin_unlock(&adapter->stats64_lock);
1534 1553
1535 adapter->link_speed = 0; 1554 adapter->link_speed = 0;
1536 adapter->link_duplex = 0; 1555 adapter->link_duplex = 0;
@@ -1646,7 +1665,7 @@ void igb_reset(struct igb_adapter *adapter)
1646 if (adapter->vfs_allocated_count) { 1665 if (adapter->vfs_allocated_count) {
1647 int i; 1666 int i;
1648 for (i = 0 ; i < adapter->vfs_allocated_count; i++) 1667 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1649 adapter->vf_data[i].flags = 0; 1668 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
1650 1669
1651 /* ping all the active vfs to let them know we are going down */ 1670 /* ping all the active vfs to let them know we are going down */
1652 igb_ping_all_vfs(adapter); 1671 igb_ping_all_vfs(adapter);
@@ -1662,7 +1681,58 @@ void igb_reset(struct igb_adapter *adapter)
1662 1681
1663 if (hw->mac.ops.init_hw(hw)) 1682 if (hw->mac.ops.init_hw(hw))
1664 dev_err(&pdev->dev, "Hardware Error\n"); 1683 dev_err(&pdev->dev, "Hardware Error\n");
1684 if (hw->mac.type > e1000_82580) {
1685 if (adapter->flags & IGB_FLAG_DMAC) {
1686 u32 reg;
1687
1688 /*
1689 * DMA Coalescing high water mark needs to be higher
1690 * than * the * Rx threshold. The Rx threshold is
1691 * currently * pba - 6, so we * should use a high water
1692 * mark of pba * - 4. */
1693 hwm = (pba - 4) << 10;
1694
1695 reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
1696 & E1000_DMACR_DMACTHR_MASK);
1697
1698 /* transition to L0x or L1 if available..*/
1699 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1700
1701 /* watchdog timer= +-1000 usec in 32usec intervals */
1702 reg |= (1000 >> 5);
1703 wr32(E1000_DMACR, reg);
1704
1705 /* no lower threshold to disable coalescing(smart fifb)
1706 * -UTRESH=0*/
1707 wr32(E1000_DMCRTRH, 0);
1708
1709 /* set hwm to PBA - 2 * max frame size */
1710 wr32(E1000_FCRTC, hwm);
1665 1711
1712 /*
1713 * This sets the time to wait before requesting tran-
1714 * sition to * low power state to number of usecs needed
1715 * to receive 1 512 * byte frame at gigabit line rate
1716 */
1717 reg = rd32(E1000_DMCTLX);
1718 reg |= IGB_DMCTLX_DCFLUSH_DIS;
1719
1720 /* Delay 255 usec before entering Lx state. */
1721 reg |= 0xFF;
1722 wr32(E1000_DMCTLX, reg);
1723
1724 /* free space in Tx packet buffer to wake from DMAC */
1725 wr32(E1000_DMCTXTH,
1726 (IGB_MIN_TXPBSIZE -
1727 (IGB_TX_BUF_4096 + adapter->max_frame_size))
1728 >> 6);
1729
1730 /* make low power state decision controlled by DMAC */
1731 reg = rd32(E1000_PCIEMISC);
1732 reg |= E1000_PCIEMISC_LX_DECISION;
1733 wr32(E1000_PCIEMISC, reg);
1734 } /* end if IGB_FLAG_DMAC set */
1735 }
1666 if (hw->mac.type == e1000_82580) { 1736 if (hw->mac.type == e1000_82580) {
1667 u32 reg = rd32(E1000_PCIEMISC); 1737 u32 reg = rd32(E1000_PCIEMISC);
1668 wr32(E1000_PCIEMISC, 1738 wr32(E1000_PCIEMISC,
@@ -1683,7 +1753,7 @@ static const struct net_device_ops igb_netdev_ops = {
1683 .ndo_open = igb_open, 1753 .ndo_open = igb_open,
1684 .ndo_stop = igb_close, 1754 .ndo_stop = igb_close,
1685 .ndo_start_xmit = igb_xmit_frame_adv, 1755 .ndo_start_xmit = igb_xmit_frame_adv,
1686 .ndo_get_stats = igb_get_stats, 1756 .ndo_get_stats64 = igb_get_stats64,
1687 .ndo_set_rx_mode = igb_set_rx_mode, 1757 .ndo_set_rx_mode = igb_set_rx_mode,
1688 .ndo_set_multicast_list = igb_set_rx_mode, 1758 .ndo_set_multicast_list = igb_set_rx_mode,
1689 .ndo_set_mac_address = igb_set_mac, 1759 .ndo_set_mac_address = igb_set_mac,
@@ -1721,12 +1791,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1721 struct igb_adapter *adapter; 1791 struct igb_adapter *adapter;
1722 struct e1000_hw *hw; 1792 struct e1000_hw *hw;
1723 u16 eeprom_data = 0; 1793 u16 eeprom_data = 0;
1794 s32 ret_val;
1724 static int global_quad_port_a; /* global quad port a indication */ 1795 static int global_quad_port_a; /* global quad port a indication */
1725 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; 1796 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1726 unsigned long mmio_start, mmio_len; 1797 unsigned long mmio_start, mmio_len;
1727 int err, pci_using_dac; 1798 int err, pci_using_dac;
1728 u16 eeprom_apme_mask = IGB_EEPROM_APME; 1799 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1729 u32 part_num; 1800 u8 part_str[E1000_PBANUM_LENGTH];
1730 1801
1731 /* Catch broken hardware that put the wrong VF device ID in 1802 /* Catch broken hardware that put the wrong VF device ID in
1732 * the PCIe SR-IOV capability. 1803 * the PCIe SR-IOV capability.
@@ -1856,8 +1927,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1856 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 1927 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
1857 netdev->vlan_features |= NETIF_F_SG; 1928 netdev->vlan_features |= NETIF_F_SG;
1858 1929
1859 if (pci_using_dac) 1930 if (pci_using_dac) {
1860 netdev->features |= NETIF_F_HIGHDMA; 1931 netdev->features |= NETIF_F_HIGHDMA;
1932 netdev->vlan_features |= NETIF_F_HIGHDMA;
1933 }
1861 1934
1862 if (hw->mac.type >= e1000_82576) 1935 if (hw->mac.type >= e1000_82576)
1863 netdev->features |= NETIF_F_SCTP_CSUM; 1936 netdev->features |= NETIF_F_SCTP_CSUM;
@@ -1869,7 +1942,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1869 hw->mac.ops.reset_hw(hw); 1942 hw->mac.ops.reset_hw(hw);
1870 1943
1871 /* make sure the NVM is good */ 1944 /* make sure the NVM is good */
1872 if (igb_validate_nvm_checksum(hw) < 0) { 1945 if (hw->nvm.ops.validate(hw) < 0) {
1873 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); 1946 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1874 err = -EIO; 1947 err = -EIO;
1875 goto err_eeprom; 1948 goto err_eeprom;
@@ -1888,9 +1961,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1888 goto err_eeprom; 1961 goto err_eeprom;
1889 } 1962 }
1890 1963
1891 setup_timer(&adapter->watchdog_timer, &igb_watchdog, 1964 setup_timer(&adapter->watchdog_timer, igb_watchdog,
1892 (unsigned long) adapter); 1965 (unsigned long) adapter);
1893 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info, 1966 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
1894 (unsigned long) adapter); 1967 (unsigned long) adapter);
1895 1968
1896 INIT_WORK(&adapter->reset_task, igb_reset_task); 1969 INIT_WORK(&adapter->reset_task, igb_reset_task);
@@ -1977,6 +2050,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1977 } 2050 }
1978 2051
1979#endif 2052#endif
2053 /* do hw tstamp init after resetting */
2054 igb_init_hw_timer(adapter);
2055
1980 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 2056 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1981 /* print bus type/speed/width info */ 2057 /* print bus type/speed/width info */
1982 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 2058 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -1990,16 +2066,22 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1990 "unknown"), 2066 "unknown"),
1991 netdev->dev_addr); 2067 netdev->dev_addr);
1992 2068
1993 igb_read_part_num(hw, &part_num); 2069 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
1994 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name, 2070 if (ret_val)
1995 (part_num >> 8), (part_num & 0xff)); 2071 strcpy(part_str, "Unknown");
1996 2072 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
1997 dev_info(&pdev->dev, 2073 dev_info(&pdev->dev,
1998 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", 2074 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1999 adapter->msix_entries ? "MSI-X" : 2075 adapter->msix_entries ? "MSI-X" :
2000 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", 2076 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
2001 adapter->num_rx_queues, adapter->num_tx_queues); 2077 adapter->num_rx_queues, adapter->num_tx_queues);
2002 2078 switch (hw->mac.type) {
2079 case e1000_i350:
2080 igb_set_eee_i350(hw);
2081 break;
2082 default:
2083 break;
2084 }
2003 return 0; 2085 return 0;
2004 2086
2005err_register: 2087err_register:
@@ -2039,13 +2121,16 @@ static void __devexit igb_remove(struct pci_dev *pdev)
2039 struct igb_adapter *adapter = netdev_priv(netdev); 2121 struct igb_adapter *adapter = netdev_priv(netdev);
2040 struct e1000_hw *hw = &adapter->hw; 2122 struct e1000_hw *hw = &adapter->hw;
2041 2123
2042 /* flush_scheduled work may reschedule our watchdog task, so 2124 /*
2043 * explicitly disable watchdog tasks from being rescheduled */ 2125 * The watchdog timer may be rescheduled, so explicitly
2126 * disable watchdog from being rescheduled.
2127 */
2044 set_bit(__IGB_DOWN, &adapter->state); 2128 set_bit(__IGB_DOWN, &adapter->state);
2045 del_timer_sync(&adapter->watchdog_timer); 2129 del_timer_sync(&adapter->watchdog_timer);
2046 del_timer_sync(&adapter->phy_info_timer); 2130 del_timer_sync(&adapter->phy_info_timer);
2047 2131
2048 flush_scheduled_work(); 2132 cancel_work_sync(&adapter->reset_task);
2133 cancel_work_sync(&adapter->watchdog_task);
2049 2134
2050#ifdef CONFIG_IGB_DCA 2135#ifdef CONFIG_IGB_DCA
2051 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { 2136 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
@@ -2133,6 +2218,9 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2133 random_ether_addr(mac_addr); 2218 random_ether_addr(mac_addr);
2134 igb_set_vf_mac(adapter, i, mac_addr); 2219 igb_set_vf_mac(adapter, i, mac_addr);
2135 } 2220 }
2221 /* DMA Coalescing is not supported in IOV mode. */
2222 if (adapter->flags & IGB_FLAG_DMAC)
2223 adapter->flags &= ~IGB_FLAG_DMAC;
2136 } 2224 }
2137#endif /* CONFIG_PCI_IOV */ 2225#endif /* CONFIG_PCI_IOV */
2138} 2226}
@@ -2205,7 +2293,7 @@ static void igb_init_hw_timer(struct igb_adapter *adapter)
2205 /** 2293 /**
2206 * Scale the NIC clock cycle by a large factor so that 2294 * Scale the NIC clock cycle by a large factor so that
2207 * relatively small clock corrections can be added or 2295 * relatively small clock corrections can be added or
2208 * substracted at each clock tick. The drawbacks of a large 2296 * subtracted at each clock tick. The drawbacks of a large
2209 * factor are a) that the clock register overflows more quickly 2297 * factor are a) that the clock register overflows more quickly
2210 * (not such a big deal) and b) that the increment per tick has 2298 * (not such a big deal) and b) that the increment per tick has
2211 * to fit into 24 bits. As a result we need to use a shift of 2299 * to fit into 24 bits. As a result we need to use a shift of
@@ -2268,12 +2356,26 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2268 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 2356 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2269 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 2357 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2270 2358
2359 spin_lock_init(&adapter->stats64_lock);
2271#ifdef CONFIG_PCI_IOV 2360#ifdef CONFIG_PCI_IOV
2272 if (hw->mac.type == e1000_82576) 2361 switch (hw->mac.type) {
2273 adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs; 2362 case e1000_82576:
2274 2363 case e1000_i350:
2364 if (max_vfs > 7) {
2365 dev_warn(&pdev->dev,
2366 "Maximum of 7 VFs per PF, using max\n");
2367 adapter->vfs_allocated_count = 7;
2368 } else
2369 adapter->vfs_allocated_count = max_vfs;
2370 break;
2371 default:
2372 break;
2373 }
2275#endif /* CONFIG_PCI_IOV */ 2374#endif /* CONFIG_PCI_IOV */
2276 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); 2375 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
2376 /* i350 cannot do RSS and SR-IOV at the same time */
2377 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2378 adapter->rss_queues = 1;
2277 2379
2278 /* 2380 /*
2279 * if rss_queues > 4 or vfs are going to be allocated with rss_queues 2381 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
@@ -2290,12 +2392,14 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2290 return -ENOMEM; 2392 return -ENOMEM;
2291 } 2393 }
2292 2394
2293 igb_init_hw_timer(adapter);
2294 igb_probe_vfs(adapter); 2395 igb_probe_vfs(adapter);
2295 2396
2296 /* Explicitly disable IRQ since the NIC can be in any state. */ 2397 /* Explicitly disable IRQ since the NIC can be in any state. */
2297 igb_irq_disable(adapter); 2398 igb_irq_disable(adapter);
2298 2399
2400 if (hw->mac.type == e1000_i350)
2401 adapter->flags &= ~IGB_FLAG_DMAC;
2402
2299 set_bit(__IGB_DOWN, &adapter->state); 2403 set_bit(__IGB_DOWN, &adapter->state);
2300 return 0; 2404 return 0;
2301} 2405}
@@ -2425,10 +2529,9 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2425 int size; 2529 int size;
2426 2530
2427 size = sizeof(struct igb_buffer) * tx_ring->count; 2531 size = sizeof(struct igb_buffer) * tx_ring->count;
2428 tx_ring->buffer_info = vmalloc(size); 2532 tx_ring->buffer_info = vzalloc(size);
2429 if (!tx_ring->buffer_info) 2533 if (!tx_ring->buffer_info)
2430 goto err; 2534 goto err;
2431 memset(tx_ring->buffer_info, 0, size);
2432 2535
2433 /* round up to nearest 4K */ 2536 /* round up to nearest 4K */
2434 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 2537 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
@@ -2576,10 +2679,9 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2576 int size, desc_len; 2679 int size, desc_len;
2577 2680
2578 size = sizeof(struct igb_buffer) * rx_ring->count; 2681 size = sizeof(struct igb_buffer) * rx_ring->count;
2579 rx_ring->buffer_info = vmalloc(size); 2682 rx_ring->buffer_info = vzalloc(size);
2580 if (!rx_ring->buffer_info) 2683 if (!rx_ring->buffer_info)
2581 goto err; 2684 goto err;
2582 memset(rx_ring->buffer_info, 0, size);
2583 2685
2584 desc_len = sizeof(union e1000_adv_rx_desc); 2686 desc_len = sizeof(union e1000_adv_rx_desc);
2585 2687
@@ -3311,7 +3413,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
3311 } else { 3413 } else {
3312 /* 3414 /*
3313 * Write addresses to the MTA, if the attempt fails 3415 * Write addresses to the MTA, if the attempt fails
3314 * then we should just turn on promiscous mode so 3416 * then we should just turn on promiscuous mode so
3315 * that we can at least receive multicast traffic 3417 * that we can at least receive multicast traffic
3316 */ 3418 */
3317 count = igb_write_mc_addr_list(netdev); 3419 count = igb_write_mc_addr_list(netdev);
@@ -3325,7 +3427,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
3325 /* 3427 /*
3326 * Write addresses to available RAR registers, if there is not 3428 * Write addresses to available RAR registers, if there is not
3327 * sufficient space to store all the addresses then enable 3429 * sufficient space to store all the addresses then enable
3328 * unicast promiscous mode 3430 * unicast promiscuous mode
3329 */ 3431 */
3330 count = igb_write_uc_addr_list(netdev); 3432 count = igb_write_uc_addr_list(netdev);
3331 if (count < 0) { 3433 if (count < 0) {
@@ -3351,6 +3453,45 @@ static void igb_set_rx_mode(struct net_device *netdev)
3351 igb_restore_vf_multicasts(adapter); 3453 igb_restore_vf_multicasts(adapter);
3352} 3454}
3353 3455
3456static void igb_check_wvbr(struct igb_adapter *adapter)
3457{
3458 struct e1000_hw *hw = &adapter->hw;
3459 u32 wvbr = 0;
3460
3461 switch (hw->mac.type) {
3462 case e1000_82576:
3463 case e1000_i350:
3464 if (!(wvbr = rd32(E1000_WVBR)))
3465 return;
3466 break;
3467 default:
3468 break;
3469 }
3470
3471 adapter->wvbr |= wvbr;
3472}
3473
3474#define IGB_STAGGERED_QUEUE_OFFSET 8
3475
3476static void igb_spoof_check(struct igb_adapter *adapter)
3477{
3478 int j;
3479
3480 if (!adapter->wvbr)
3481 return;
3482
3483 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3484 if (adapter->wvbr & (1 << j) ||
3485 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3486 dev_warn(&adapter->pdev->dev,
3487 "Spoof event(s) detected on VF %d\n", j);
3488 adapter->wvbr &=
3489 ~((1 << j) |
3490 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3491 }
3492 }
3493}
3494
3354/* Need to wait a few seconds after link up to get diagnostic information from 3495/* Need to wait a few seconds after link up to get diagnostic information from
3355 * the phy */ 3496 * the phy */
3356static void igb_update_phy_info(unsigned long data) 3497static void igb_update_phy_info(unsigned long data)
@@ -3395,6 +3536,25 @@ bool igb_has_link(struct igb_adapter *adapter)
3395 return link_active; 3536 return link_active;
3396} 3537}
3397 3538
3539static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3540{
3541 bool ret = false;
3542 u32 ctrl_ext, thstat;
3543
3544 /* check for thermal sensor event on i350, copper only */
3545 if (hw->mac.type == e1000_i350) {
3546 thstat = rd32(E1000_THSTAT);
3547 ctrl_ext = rd32(E1000_CTRL_EXT);
3548
3549 if ((hw->phy.media_type == e1000_media_type_copper) &&
3550 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3551 ret = !!(thstat & event);
3552 }
3553 }
3554
3555 return ret;
3556}
3557
3398/** 3558/**
3399 * igb_watchdog - Timer Call-back 3559 * igb_watchdog - Timer Call-back
3400 * @data: pointer to adapter cast into an unsigned long 3560 * @data: pointer to adapter cast into an unsigned long
@@ -3437,6 +3597,14 @@ static void igb_watchdog_task(struct work_struct *work)
3437 ((ctrl & E1000_CTRL_RFCE) ? "RX" : 3597 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3438 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); 3598 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
3439 3599
3600 /* check for thermal sensor event */
3601 if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
3602 printk(KERN_INFO "igb: %s The network adapter "
3603 "link speed was downshifted "
3604 "because it overheated.\n",
3605 netdev->name);
3606 }
3607
3440 /* adjust timeout factor according to speed/duplex */ 3608 /* adjust timeout factor according to speed/duplex */
3441 adapter->tx_timeout_factor = 1; 3609 adapter->tx_timeout_factor = 1;
3442 switch (adapter->link_speed) { 3610 switch (adapter->link_speed) {
@@ -3451,6 +3619,7 @@ static void igb_watchdog_task(struct work_struct *work)
3451 netif_carrier_on(netdev); 3619 netif_carrier_on(netdev);
3452 3620
3453 igb_ping_all_vfs(adapter); 3621 igb_ping_all_vfs(adapter);
3622 igb_check_vf_rate_limit(adapter);
3454 3623
3455 /* link state has changed, schedule phy info update */ 3624 /* link state has changed, schedule phy info update */
3456 if (!test_bit(__IGB_DOWN, &adapter->state)) 3625 if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -3461,6 +3630,15 @@ static void igb_watchdog_task(struct work_struct *work)
3461 if (netif_carrier_ok(netdev)) { 3630 if (netif_carrier_ok(netdev)) {
3462 adapter->link_speed = 0; 3631 adapter->link_speed = 0;
3463 adapter->link_duplex = 0; 3632 adapter->link_duplex = 0;
3633
3634 /* check for thermal sensor event */
3635 if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
3636 printk(KERN_ERR "igb: %s The network adapter "
3637 "was stopped because it "
3638 "overheated.\n",
3639 netdev->name);
3640 }
3641
3464 /* Links status message must follow this format */ 3642 /* Links status message must follow this format */
3465 printk(KERN_INFO "igb: %s NIC Link is Down\n", 3643 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3466 netdev->name); 3644 netdev->name);
@@ -3475,7 +3653,9 @@ static void igb_watchdog_task(struct work_struct *work)
3475 } 3653 }
3476 } 3654 }
3477 3655
3478 igb_update_stats(adapter); 3656 spin_lock(&adapter->stats64_lock);
3657 igb_update_stats(adapter, &adapter->stats64);
3658 spin_unlock(&adapter->stats64_lock);
3479 3659
3480 for (i = 0; i < adapter->num_tx_queues; i++) { 3660 for (i = 0; i < adapter->num_tx_queues; i++) {
3481 struct igb_ring *tx_ring = adapter->tx_ring[i]; 3661 struct igb_ring *tx_ring = adapter->tx_ring[i];
@@ -3508,6 +3688,8 @@ static void igb_watchdog_task(struct work_struct *work)
3508 wr32(E1000_ICS, E1000_ICS_RXDMT0); 3688 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3509 } 3689 }
3510 3690
3691 igb_spoof_check(adapter);
3692
3511 /* Reset the timer */ 3693 /* Reset the timer */
3512 if (!test_bit(__IGB_DOWN, &adapter->state)) 3694 if (!test_bit(__IGB_DOWN, &adapter->state))
3513 mod_timer(&adapter->watchdog_timer, 3695 mod_timer(&adapter->watchdog_timer,
@@ -3527,7 +3709,7 @@ enum latency_range {
3527 * Stores a new ITR value based on strictly on packet size. This 3709 * Stores a new ITR value based on strictly on packet size. This
3528 * algorithm is less sophisticated than that used in igb_update_itr, 3710 * algorithm is less sophisticated than that used in igb_update_itr,
3529 * due to the difficulty of synchronizing statistics across multiple 3711 * due to the difficulty of synchronizing statistics across multiple
3530 * receive rings. The divisors and thresholds used by this fuction 3712 * receive rings. The divisors and thresholds used by this function
3531 * were determined based on theoretical maximum wire speed and testing 3713 * were determined based on theoretical maximum wire speed and testing
3532 * data, in order to minimize response time while increasing bulk 3714 * data, in order to minimize response time while increasing bulk
3533 * throughput. 3715 * throughput.
@@ -3542,6 +3724,8 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3542 int new_val = q_vector->itr_val; 3724 int new_val = q_vector->itr_val;
3543 int avg_wire_size = 0; 3725 int avg_wire_size = 0;
3544 struct igb_adapter *adapter = q_vector->adapter; 3726 struct igb_adapter *adapter = q_vector->adapter;
3727 struct igb_ring *ring;
3728 unsigned int packets;
3545 3729
3546 /* For non-gigabit speeds, just fix the interrupt rate at 4000 3730 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3547 * ints/sec - ITR timer value of 120 ticks. 3731 * ints/sec - ITR timer value of 120 ticks.
@@ -3551,16 +3735,21 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3551 goto set_itr_val; 3735 goto set_itr_val;
3552 } 3736 }
3553 3737
3554 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) { 3738 ring = q_vector->rx_ring;
3555 struct igb_ring *ring = q_vector->rx_ring; 3739 if (ring) {
3556 avg_wire_size = ring->total_bytes / ring->total_packets; 3740 packets = ACCESS_ONCE(ring->total_packets);
3741
3742 if (packets)
3743 avg_wire_size = ring->total_bytes / packets;
3557 } 3744 }
3558 3745
3559 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) { 3746 ring = q_vector->tx_ring;
3560 struct igb_ring *ring = q_vector->tx_ring; 3747 if (ring) {
3561 avg_wire_size = max_t(u32, avg_wire_size, 3748 packets = ACCESS_ONCE(ring->total_packets);
3562 (ring->total_bytes / 3749
3563 ring->total_packets)); 3750 if (packets)
3751 avg_wire_size = max_t(u32, avg_wire_size,
3752 ring->total_bytes / packets);
3564 } 3753 }
3565 3754
3566 /* if avg_wire_size isn't set no work was done */ 3755 /* if avg_wire_size isn't set no work was done */
@@ -3954,7 +4143,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3954 } 4143 }
3955 4144
3956 tx_ring->buffer_info[i].skb = skb; 4145 tx_ring->buffer_info[i].skb = skb;
3957 tx_ring->buffer_info[i].shtx = skb_shinfo(skb)->tx_flags; 4146 tx_ring->buffer_info[i].tx_flags = skb_shinfo(skb)->tx_flags;
3958 /* multiply data chunks by size of headers */ 4147 /* multiply data chunks by size of headers */
3959 tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len; 4148 tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
3960 tx_ring->buffer_info[i].gso_segs = gso_segs; 4149 tx_ring->buffer_info[i].gso_segs = gso_segs;
@@ -4069,7 +4258,11 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
4069 4258
4070 /* A reprieve! */ 4259 /* A reprieve! */
4071 netif_wake_subqueue(netdev, tx_ring->queue_index); 4260 netif_wake_subqueue(netdev, tx_ring->queue_index);
4072 tx_ring->tx_stats.restart_queue++; 4261
4262 u64_stats_update_begin(&tx_ring->tx_syncp2);
4263 tx_ring->tx_stats.restart_queue2++;
4264 u64_stats_update_end(&tx_ring->tx_syncp2);
4265
4073 return 0; 4266 return 0;
4074} 4267}
4075 4268
@@ -4083,12 +4276,10 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
4083netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, 4276netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
4084 struct igb_ring *tx_ring) 4277 struct igb_ring *tx_ring)
4085{ 4278{
4086 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4087 int tso = 0, count; 4279 int tso = 0, count;
4088 u32 tx_flags = 0; 4280 u32 tx_flags = 0;
4089 u16 first; 4281 u16 first;
4090 u8 hdr_len = 0; 4282 u8 hdr_len = 0;
4091 union skb_shared_tx *shtx = skb_tx(skb);
4092 4283
4093 /* need: 1 descriptor per page, 4284 /* need: 1 descriptor per page,
4094 * + 2 desc gap to keep tail from touching head, 4285 * + 2 desc gap to keep tail from touching head,
@@ -4100,12 +4291,12 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
4100 return NETDEV_TX_BUSY; 4291 return NETDEV_TX_BUSY;
4101 } 4292 }
4102 4293
4103 if (unlikely(shtx->hardware)) { 4294 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4104 shtx->in_progress = 1; 4295 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4105 tx_flags |= IGB_TX_FLAGS_TSTAMP; 4296 tx_flags |= IGB_TX_FLAGS_TSTAMP;
4106 } 4297 }
4107 4298
4108 if (vlan_tx_tag_present(skb) && adapter->vlgrp) { 4299 if (vlan_tx_tag_present(skb)) {
4109 tx_flags |= IGB_TX_FLAGS_VLAN; 4300 tx_flags |= IGB_TX_FLAGS_VLAN;
4110 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); 4301 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4111 } 4302 }
@@ -4131,7 +4322,7 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
4131 4322
4132 /* 4323 /*
4133 * count reflects descriptors mapped, if 0 or less then mapping error 4324 * count reflects descriptors mapped, if 0 or less then mapping error
4134 * has occured and we need to rewind the descriptor queue 4325 * has occurred and we need to rewind the descriptor queue
4135 */ 4326 */
4136 count = igb_tx_map_adv(tx_ring, skb, first); 4327 count = igb_tx_map_adv(tx_ring, skb, first);
4137 if (!count) { 4328 if (!count) {
@@ -4207,16 +4398,22 @@ static void igb_reset_task(struct work_struct *work)
4207} 4398}
4208 4399
4209/** 4400/**
4210 * igb_get_stats - Get System Network Statistics 4401 * igb_get_stats64 - Get System Network Statistics
4211 * @netdev: network interface device structure 4402 * @netdev: network interface device structure
4403 * @stats: rtnl_link_stats64 pointer
4212 * 4404 *
4213 * Returns the address of the device statistics structure.
4214 * The statistics are actually updated from the timer callback.
4215 **/ 4405 **/
4216static struct net_device_stats *igb_get_stats(struct net_device *netdev) 4406static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4407 struct rtnl_link_stats64 *stats)
4217{ 4408{
4218 /* only return the current stats */ 4409 struct igb_adapter *adapter = netdev_priv(netdev);
4219 return &netdev->stats; 4410
4411 spin_lock(&adapter->stats64_lock);
4412 igb_update_stats(adapter, &adapter->stats64);
4413 memcpy(stats, &adapter->stats64, sizeof(*stats));
4414 spin_unlock(&adapter->stats64_lock);
4415
4416 return stats;
4220} 4417}
4221 4418
4222/** 4419/**
@@ -4298,15 +4495,17 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4298 * @adapter: board private structure 4495 * @adapter: board private structure
4299 **/ 4496 **/
4300 4497
4301void igb_update_stats(struct igb_adapter *adapter) 4498void igb_update_stats(struct igb_adapter *adapter,
4499 struct rtnl_link_stats64 *net_stats)
4302{ 4500{
4303 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
4304 struct e1000_hw *hw = &adapter->hw; 4501 struct e1000_hw *hw = &adapter->hw;
4305 struct pci_dev *pdev = adapter->pdev; 4502 struct pci_dev *pdev = adapter->pdev;
4306 u32 reg, mpc; 4503 u32 reg, mpc;
4307 u16 phy_tmp; 4504 u16 phy_tmp;
4308 int i; 4505 int i;
4309 u64 bytes, packets; 4506 u64 bytes, packets;
4507 unsigned int start;
4508 u64 _bytes, _packets;
4310 4509
4311#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 4510#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4312 4511
@@ -4324,10 +4523,17 @@ void igb_update_stats(struct igb_adapter *adapter)
4324 for (i = 0; i < adapter->num_rx_queues; i++) { 4523 for (i = 0; i < adapter->num_rx_queues; i++) {
4325 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; 4524 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
4326 struct igb_ring *ring = adapter->rx_ring[i]; 4525 struct igb_ring *ring = adapter->rx_ring[i];
4526
4327 ring->rx_stats.drops += rqdpc_tmp; 4527 ring->rx_stats.drops += rqdpc_tmp;
4328 net_stats->rx_fifo_errors += rqdpc_tmp; 4528 net_stats->rx_fifo_errors += rqdpc_tmp;
4329 bytes += ring->rx_stats.bytes; 4529
4330 packets += ring->rx_stats.packets; 4530 do {
4531 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4532 _bytes = ring->rx_stats.bytes;
4533 _packets = ring->rx_stats.packets;
4534 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4535 bytes += _bytes;
4536 packets += _packets;
4331 } 4537 }
4332 4538
4333 net_stats->rx_bytes = bytes; 4539 net_stats->rx_bytes = bytes;
@@ -4337,8 +4543,13 @@ void igb_update_stats(struct igb_adapter *adapter)
4337 packets = 0; 4543 packets = 0;
4338 for (i = 0; i < adapter->num_tx_queues; i++) { 4544 for (i = 0; i < adapter->num_tx_queues; i++) {
4339 struct igb_ring *ring = adapter->tx_ring[i]; 4545 struct igb_ring *ring = adapter->tx_ring[i];
4340 bytes += ring->tx_stats.bytes; 4546 do {
4341 packets += ring->tx_stats.packets; 4547 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4548 _bytes = ring->tx_stats.bytes;
4549 _packets = ring->tx_stats.packets;
4550 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4551 bytes += _bytes;
4552 packets += _packets;
4342 } 4553 }
4343 net_stats->tx_bytes = bytes; 4554 net_stats->tx_bytes = bytes;
4344 net_stats->tx_packets = packets; 4555 net_stats->tx_packets = packets;
@@ -4460,6 +4671,15 @@ void igb_update_stats(struct igb_adapter *adapter)
4460 adapter->stats.mgptc += rd32(E1000_MGTPTC); 4671 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4461 adapter->stats.mgprc += rd32(E1000_MGTPRC); 4672 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4462 adapter->stats.mgpdc += rd32(E1000_MGTPDC); 4673 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
4674
4675 /* OS2BMC Stats */
4676 reg = rd32(E1000_MANC);
4677 if (reg & E1000_MANC_EN_BMC2OS) {
4678 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4679 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4680 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4681 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4682 }
4463} 4683}
4464 4684
4465static irqreturn_t igb_msix_other(int irq, void *data) 4685static irqreturn_t igb_msix_other(int irq, void *data)
@@ -4475,6 +4695,10 @@ static irqreturn_t igb_msix_other(int irq, void *data)
4475 if (icr & E1000_ICR_DOUTSYNC) { 4695 if (icr & E1000_ICR_DOUTSYNC) {
4476 /* HW is reporting DMA is out of sync */ 4696 /* HW is reporting DMA is out of sync */
4477 adapter->stats.doosync++; 4697 adapter->stats.doosync++;
4698 /* The DMA Out of Sync is also indication of a spoof event
4699 * in IOV mode. Check the Wrong VM Behavior register to
4700 * see if it is really a spoof event. */
4701 igb_check_wvbr(adapter);
4478 } 4702 }
4479 4703
4480 /* Check for a mailbox event */ 4704 /* Check for a mailbox event */
@@ -4660,12 +4884,13 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4660 u32 vmolr = rd32(E1000_VMOLR(vf)); 4884 u32 vmolr = rd32(E1000_VMOLR(vf));
4661 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 4885 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4662 4886
4663 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC | 4887 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
4664 IGB_VF_FLAG_MULTI_PROMISC); 4888 IGB_VF_FLAG_MULTI_PROMISC);
4665 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); 4889 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4666 4890
4667 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { 4891 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4668 vmolr |= E1000_VMOLR_MPME; 4892 vmolr |= E1000_VMOLR_MPME;
4893 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
4669 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; 4894 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4670 } else { 4895 } else {
4671 /* 4896 /*
@@ -4926,8 +5151,8 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4926 5151
4927static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) 5152static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
4928{ 5153{
4929 /* clear flags */ 5154 /* clear flags - except flag that indicates PF has set the MAC */
4930 adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC); 5155 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
4931 adapter->vf_data[vf].last_nack = jiffies; 5156 adapter->vf_data[vf].last_nack = jiffies;
4932 5157
4933 /* reset offloads to defaults */ 5158 /* reset offloads to defaults */
@@ -4981,7 +5206,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4981 reg = rd32(E1000_VFRE); 5206 reg = rd32(E1000_VFRE);
4982 wr32(E1000_VFRE, reg | (1 << vf)); 5207 wr32(E1000_VFRE, reg | (1 << vf));
4983 5208
4984 adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS; 5209 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
4985 5210
4986 /* reply to reset with ack and vf mac address */ 5211 /* reply to reset with ack and vf mac address */
4987 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; 5212 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
@@ -5060,7 +5285,14 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
5060 5285
5061 switch ((msgbuf[0] & 0xFFFF)) { 5286 switch ((msgbuf[0] & 0xFFFF)) {
5062 case E1000_VF_SET_MAC_ADDR: 5287 case E1000_VF_SET_MAC_ADDR:
5063 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); 5288 retval = -EINVAL;
5289 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5290 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5291 else
5292 dev_warn(&pdev->dev,
5293 "VF %d attempted to override administratively "
5294 "set MAC address\nReload the VF driver to "
5295 "resume operations\n", vf);
5064 break; 5296 break;
5065 case E1000_VF_SET_PROMISC: 5297 case E1000_VF_SET_PROMISC:
5066 retval = igb_set_vf_promisc(adapter, msgbuf, vf); 5298 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
@@ -5072,8 +5304,12 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
5072 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); 5304 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5073 break; 5305 break;
5074 case E1000_VF_SET_VLAN: 5306 case E1000_VF_SET_VLAN:
5075 if (adapter->vf_data[vf].pf_vlan) 5307 retval = -1;
5076 retval = -1; 5308 if (vf_data->pf_vlan)
5309 dev_warn(&pdev->dev,
5310 "VF %d attempted to override administratively "
5311 "set VLAN tag\nReload the VF driver to "
5312 "resume operations\n", vf);
5077 else 5313 else
5078 retval = igb_set_vf_vlan(adapter, msgbuf, vf); 5314 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
5079 break; 5315 break;
@@ -5121,8 +5357,8 @@ static void igb_msg_task(struct igb_adapter *adapter)
5121 * The unicast table address is a register array of 32-bit registers. 5357 * The unicast table address is a register array of 32-bit registers.
5122 * The table is meant to be used in a way similar to how the MTA is used 5358 * The table is meant to be used in a way similar to how the MTA is used
5123 * however due to certain limitations in the hardware it is necessary to 5359 * however due to certain limitations in the hardware it is necessary to
5124 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous 5360 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5125 * enable bit to allow vlan tag stripping when promiscous mode is enabled 5361 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
5126 **/ 5362 **/
5127static void igb_set_uta(struct igb_adapter *adapter) 5363static void igb_set_uta(struct igb_adapter *adapter)
5128{ 5364{
@@ -5319,7 +5555,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *bu
5319 u64 regval; 5555 u64 regval;
5320 5556
5321 /* if skb does not support hw timestamp or TX stamp not valid exit */ 5557 /* if skb does not support hw timestamp or TX stamp not valid exit */
5322 if (likely(!buffer_info->shtx.hardware) || 5558 if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) ||
5323 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) 5559 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5324 return; 5560 return;
5325 5561
@@ -5389,7 +5625,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5389 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 5625 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
5390 !(test_bit(__IGB_DOWN, &adapter->state))) { 5626 !(test_bit(__IGB_DOWN, &adapter->state))) {
5391 netif_wake_subqueue(netdev, tx_ring->queue_index); 5627 netif_wake_subqueue(netdev, tx_ring->queue_index);
5628
5629 u64_stats_update_begin(&tx_ring->tx_syncp);
5392 tx_ring->tx_stats.restart_queue++; 5630 tx_ring->tx_stats.restart_queue++;
5631 u64_stats_update_end(&tx_ring->tx_syncp);
5393 } 5632 }
5394 } 5633 }
5395 5634
@@ -5429,9 +5668,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5429 } 5668 }
5430 tx_ring->total_bytes += total_bytes; 5669 tx_ring->total_bytes += total_bytes;
5431 tx_ring->total_packets += total_packets; 5670 tx_ring->total_packets += total_packets;
5671 u64_stats_update_begin(&tx_ring->tx_syncp);
5432 tx_ring->tx_stats.bytes += total_bytes; 5672 tx_ring->tx_stats.bytes += total_bytes;
5433 tx_ring->tx_stats.packets += total_packets; 5673 tx_ring->tx_stats.packets += total_packets;
5434 return (count < tx_ring->count); 5674 u64_stats_update_end(&tx_ring->tx_syncp);
5675 return count < tx_ring->count;
5435} 5676}
5436 5677
5437/** 5678/**
@@ -5456,7 +5697,7 @@ static void igb_receive_skb(struct igb_q_vector *q_vector,
5456static inline void igb_rx_checksum_adv(struct igb_ring *ring, 5697static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5457 u32 status_err, struct sk_buff *skb) 5698 u32 status_err, struct sk_buff *skb)
5458{ 5699{
5459 skb->ip_summed = CHECKSUM_NONE; 5700 skb_checksum_none_assert(skb);
5460 5701
5461 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ 5702 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
5462 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) || 5703 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
@@ -5472,9 +5713,11 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5472 * packets, (aka let the stack check the crc32c) 5713 * packets, (aka let the stack check the crc32c)
5473 */ 5714 */
5474 if ((skb->len == 60) && 5715 if ((skb->len == 60) &&
5475 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) 5716 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) {
5717 u64_stats_update_begin(&ring->rx_syncp);
5476 ring->rx_stats.csum_err++; 5718 ring->rx_stats.csum_err++;
5477 5719 u64_stats_update_end(&ring->rx_syncp);
5720 }
5478 /* let the stack verify checksum errors */ 5721 /* let the stack verify checksum errors */
5479 return; 5722 return;
5480 } 5723 }
@@ -5500,7 +5743,7 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5500 * values must belong to this one here and therefore we don't need to 5743 * values must belong to this one here and therefore we don't need to
5501 * compare any of the additional attributes stored for it. 5744 * compare any of the additional attributes stored for it.
5502 * 5745 *
5503 * If nothing went wrong, then it should have a skb_shared_tx that we 5746 * If nothing went wrong, then it should have a shared tx_flags that we
5504 * can turn into a skb_shared_hwtstamps. 5747 * can turn into a skb_shared_hwtstamps.
5505 */ 5748 */
5506 if (staterr & E1000_RXDADV_STAT_TSIP) { 5749 if (staterr & E1000_RXDADV_STAT_TSIP) {
@@ -5661,8 +5904,10 @@ next_desc:
5661 5904
5662 rx_ring->total_packets += total_packets; 5905 rx_ring->total_packets += total_packets;
5663 rx_ring->total_bytes += total_bytes; 5906 rx_ring->total_bytes += total_bytes;
5907 u64_stats_update_begin(&rx_ring->rx_syncp);
5664 rx_ring->rx_stats.packets += total_packets; 5908 rx_ring->rx_stats.packets += total_packets;
5665 rx_ring->rx_stats.bytes += total_bytes; 5909 rx_ring->rx_stats.bytes += total_bytes;
5910 u64_stats_update_end(&rx_ring->rx_syncp);
5666 return cleaned; 5911 return cleaned;
5667} 5912}
5668 5913
@@ -5690,8 +5935,10 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5690 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) { 5935 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
5691 if (!buffer_info->page) { 5936 if (!buffer_info->page) {
5692 buffer_info->page = netdev_alloc_page(netdev); 5937 buffer_info->page = netdev_alloc_page(netdev);
5693 if (!buffer_info->page) { 5938 if (unlikely(!buffer_info->page)) {
5939 u64_stats_update_begin(&rx_ring->rx_syncp);
5694 rx_ring->rx_stats.alloc_failed++; 5940 rx_ring->rx_stats.alloc_failed++;
5941 u64_stats_update_end(&rx_ring->rx_syncp);
5695 goto no_buffers; 5942 goto no_buffers;
5696 } 5943 }
5697 buffer_info->page_offset = 0; 5944 buffer_info->page_offset = 0;
@@ -5706,7 +5953,9 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5706 if (dma_mapping_error(rx_ring->dev, 5953 if (dma_mapping_error(rx_ring->dev,
5707 buffer_info->page_dma)) { 5954 buffer_info->page_dma)) {
5708 buffer_info->page_dma = 0; 5955 buffer_info->page_dma = 0;
5956 u64_stats_update_begin(&rx_ring->rx_syncp);
5709 rx_ring->rx_stats.alloc_failed++; 5957 rx_ring->rx_stats.alloc_failed++;
5958 u64_stats_update_end(&rx_ring->rx_syncp);
5710 goto no_buffers; 5959 goto no_buffers;
5711 } 5960 }
5712 } 5961 }
@@ -5714,8 +5963,10 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5714 skb = buffer_info->skb; 5963 skb = buffer_info->skb;
5715 if (!skb) { 5964 if (!skb) {
5716 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 5965 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
5717 if (!skb) { 5966 if (unlikely(!skb)) {
5967 u64_stats_update_begin(&rx_ring->rx_syncp);
5718 rx_ring->rx_stats.alloc_failed++; 5968 rx_ring->rx_stats.alloc_failed++;
5969 u64_stats_update_end(&rx_ring->rx_syncp);
5719 goto no_buffers; 5970 goto no_buffers;
5720 } 5971 }
5721 5972
@@ -5729,7 +5980,9 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5729 if (dma_mapping_error(rx_ring->dev, 5980 if (dma_mapping_error(rx_ring->dev,
5730 buffer_info->dma)) { 5981 buffer_info->dma)) {
5731 buffer_info->dma = 0; 5982 buffer_info->dma = 0;
5983 u64_stats_update_begin(&rx_ring->rx_syncp);
5732 rx_ring->rx_stats.alloc_failed++; 5984 rx_ring->rx_stats.alloc_failed++;
5985 u64_stats_update_end(&rx_ring->rx_syncp);
5733 goto no_buffers; 5986 goto no_buffers;
5734 } 5987 }
5735 } 5988 }
@@ -6092,7 +6345,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
6092 6345
6093 if (adapter->vlgrp) { 6346 if (adapter->vlgrp) {
6094 u16 vid; 6347 u16 vid;
6095 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 6348 for (vid = 0; vid < VLAN_N_VID; vid++) {
6096 if (!vlan_group_get_device(adapter->vlgrp, vid)) 6349 if (!vlan_group_get_device(adapter->vlgrp, vid))
6097 continue; 6350 continue;
6098 igb_vlan_rx_add_vid(adapter->netdev, vid); 6351 igb_vlan_rx_add_vid(adapter->netdev, vid);
@@ -6100,14 +6353,25 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
6100 } 6353 }
6101} 6354}
6102 6355
6103int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) 6356int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
6104{ 6357{
6105 struct pci_dev *pdev = adapter->pdev; 6358 struct pci_dev *pdev = adapter->pdev;
6106 struct e1000_mac_info *mac = &adapter->hw.mac; 6359 struct e1000_mac_info *mac = &adapter->hw.mac;
6107 6360
6108 mac->autoneg = 0; 6361 mac->autoneg = 0;
6109 6362
6110 switch (spddplx) { 6363 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6364 * for the switch() below to work */
6365 if ((spd & 1) || (dplx & ~1))
6366 goto err_inval;
6367
6368 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6369 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
6370 spd != SPEED_1000 &&
6371 dplx != DUPLEX_FULL)
6372 goto err_inval;
6373
6374 switch (spd + dplx) {
6111 case SPEED_10 + DUPLEX_HALF: 6375 case SPEED_10 + DUPLEX_HALF:
6112 mac->forced_speed_duplex = ADVERTISE_10_HALF; 6376 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6113 break; 6377 break;
@@ -6126,10 +6390,13 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
6126 break; 6390 break;
6127 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 6391 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6128 default: 6392 default:
6129 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); 6393 goto err_inval;
6130 return -EINVAL;
6131 } 6394 }
6132 return 0; 6395 return 0;
6396
6397err_inval:
6398 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6399 return -EINVAL;
6133} 6400}
6134 6401
6135static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) 6402static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
@@ -6466,9 +6733,91 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6466 return igb_set_vf_mac(adapter, vf, mac); 6733 return igb_set_vf_mac(adapter, vf, mac);
6467} 6734}
6468 6735
6736static int igb_link_mbps(int internal_link_speed)
6737{
6738 switch (internal_link_speed) {
6739 case SPEED_100:
6740 return 100;
6741 case SPEED_1000:
6742 return 1000;
6743 default:
6744 return 0;
6745 }
6746}
6747
6748static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6749 int link_speed)
6750{
6751 int rf_dec, rf_int;
6752 u32 bcnrc_val;
6753
6754 if (tx_rate != 0) {
6755 /* Calculate the rate factor values to set */
6756 rf_int = link_speed / tx_rate;
6757 rf_dec = (link_speed - (rf_int * tx_rate));
6758 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6759
6760 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6761 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6762 E1000_RTTBCNRC_RF_INT_MASK);
6763 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6764 } else {
6765 bcnrc_val = 0;
6766 }
6767
6768 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6769 wr32(E1000_RTTBCNRC, bcnrc_val);
6770}
6771
6772static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6773{
6774 int actual_link_speed, i;
6775 bool reset_rate = false;
6776
6777 /* VF TX rate limit was not set or not supported */
6778 if ((adapter->vf_rate_link_speed == 0) ||
6779 (adapter->hw.mac.type != e1000_82576))
6780 return;
6781
6782 actual_link_speed = igb_link_mbps(adapter->link_speed);
6783 if (actual_link_speed != adapter->vf_rate_link_speed) {
6784 reset_rate = true;
6785 adapter->vf_rate_link_speed = 0;
6786 dev_info(&adapter->pdev->dev,
6787 "Link speed has been changed. VF Transmit "
6788 "rate is disabled\n");
6789 }
6790
6791 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6792 if (reset_rate)
6793 adapter->vf_data[i].tx_rate = 0;
6794
6795 igb_set_vf_rate_limit(&adapter->hw, i,
6796 adapter->vf_data[i].tx_rate,
6797 actual_link_speed);
6798 }
6799}
6800
6469static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) 6801static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6470{ 6802{
6471 return -EOPNOTSUPP; 6803 struct igb_adapter *adapter = netdev_priv(netdev);
6804 struct e1000_hw *hw = &adapter->hw;
6805 int actual_link_speed;
6806
6807 if (hw->mac.type != e1000_82576)
6808 return -EOPNOTSUPP;
6809
6810 actual_link_speed = igb_link_mbps(adapter->link_speed);
6811 if ((vf >= adapter->vfs_allocated_count) ||
6812 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6813 (tx_rate < 0) || (tx_rate > actual_link_speed))
6814 return -EINVAL;
6815
6816 adapter->vf_rate_link_speed = actual_link_speed;
6817 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6818 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6819
6820 return 0;
6472} 6821}
6473 6822
6474static int igb_ndo_get_vf_config(struct net_device *netdev, 6823static int igb_ndo_get_vf_config(struct net_device *netdev,
@@ -6479,7 +6828,7 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
6479 return -EINVAL; 6828 return -EINVAL;
6480 ivi->vf = vf; 6829 ivi->vf = vf;
6481 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); 6830 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
6482 ivi->tx_rate = 0; 6831 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
6483 ivi->vlan = adapter->vf_data[vf].pf_vlan; 6832 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6484 ivi->qos = adapter->vf_data[vf].pf_qos; 6833 ivi->qos = adapter->vf_data[vf].pf_qos;
6485 return 0; 6834 return 0;
@@ -6513,6 +6862,8 @@ static void igb_vmm_control(struct igb_adapter *adapter)
6513 if (adapter->vfs_allocated_count) { 6862 if (adapter->vfs_allocated_count) {
6514 igb_vmdq_set_loopback_pf(hw, true); 6863 igb_vmdq_set_loopback_pf(hw, true);
6515 igb_vmdq_set_replication_pf(hw, true); 6864 igb_vmdq_set_replication_pf(hw, true);
6865 igb_vmdq_set_anti_spoofing_pf(hw, true,
6866 adapter->vfs_allocated_count);
6516 } else { 6867 } else {
6517 igb_vmdq_set_loopback_pf(hw, false); 6868 igb_vmdq_set_loopback_pf(hw, false);
6518 igb_vmdq_set_replication_pf(hw, false); 6869 igb_vmdq_set_replication_pf(hw, false);