aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb/igb_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/igb/igb_main.c')
-rw-r--r--drivers/net/igb/igb_main.c451
1 files changed, 189 insertions, 262 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index a50db5398fa..f8c2919bcec 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -48,12 +48,12 @@
48#endif 48#endif
49#include "igb.h" 49#include "igb.h"
50 50
51#define DRV_VERSION "1.2.45-k2" 51#define DRV_VERSION "1.3.16-k2"
52char igb_driver_name[] = "igb"; 52char igb_driver_name[] = "igb";
53char igb_driver_version[] = DRV_VERSION; 53char igb_driver_version[] = DRV_VERSION;
54static const char igb_driver_string[] = 54static const char igb_driver_string[] =
55 "Intel(R) Gigabit Ethernet Network Driver"; 55 "Intel(R) Gigabit Ethernet Network Driver";
56static const char igb_copyright[] = "Copyright (c) 2008 Intel Corporation."; 56static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
57 57
58static const struct e1000_info *igb_info_tbl[] = { 58static const struct e1000_info *igb_info_tbl[] = {
59 [board_82575] = &e1000_82575_info, 59 [board_82575] = &e1000_82575_info,
@@ -115,9 +115,6 @@ static bool igb_clean_tx_irq(struct igb_ring *);
115static int igb_poll(struct napi_struct *, int); 115static int igb_poll(struct napi_struct *, int);
116static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); 116static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
117static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); 117static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
118#ifdef CONFIG_IGB_LRO
119static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *);
120#endif
121static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); 118static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
122static void igb_tx_timeout(struct net_device *); 119static void igb_tx_timeout(struct net_device *);
123static void igb_reset_task(struct work_struct *); 120static void igb_reset_task(struct work_struct *);
@@ -407,7 +404,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
407 /* Turn on MSI-X capability first, or our settings 404 /* Turn on MSI-X capability first, or our settings
408 * won't stick. And it will take days to debug. */ 405 * won't stick. And it will take days to debug. */
409 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | 406 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
410 E1000_GPIE_PBA | E1000_GPIE_EIAME | 407 E1000_GPIE_PBA | E1000_GPIE_EIAME |
411 E1000_GPIE_NSICR); 408 E1000_GPIE_NSICR);
412 409
413 for (i = 0; i < adapter->num_tx_queues; i++) { 410 for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -546,6 +543,11 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
546 int err; 543 int err;
547 int numvecs, i; 544 int numvecs, i;
548 545
546 /* Number of supported queues. */
547 /* Having more queues than CPUs doesn't make sense. */
548 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
549 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
550
549 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1; 551 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
550 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 552 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
551 GFP_KERNEL); 553 GFP_KERNEL);
@@ -687,7 +689,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
687 wr32(E1000_EIAC, adapter->eims_enable_mask); 689 wr32(E1000_EIAC, adapter->eims_enable_mask);
688 wr32(E1000_EIAM, adapter->eims_enable_mask); 690 wr32(E1000_EIAM, adapter->eims_enable_mask);
689 wr32(E1000_EIMS, adapter->eims_enable_mask); 691 wr32(E1000_EIMS, adapter->eims_enable_mask);
690 wr32(E1000_IMS, E1000_IMS_LSC); 692 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
691 } else { 693 } else {
692 wr32(E1000_IMS, IMS_ENABLE_MASK); 694 wr32(E1000_IMS, IMS_ENABLE_MASK);
693 wr32(E1000_IAM, IMS_ENABLE_MASK); 695 wr32(E1000_IAM, IMS_ENABLE_MASK);
@@ -856,6 +858,10 @@ void igb_down(struct igb_adapter *adapter)
856 858
857 netdev->tx_queue_len = adapter->tx_queue_len; 859 netdev->tx_queue_len = adapter->tx_queue_len;
858 netif_carrier_off(netdev); 860 netif_carrier_off(netdev);
861
862 /* record the stats before reset*/
863 igb_update_stats(adapter);
864
859 adapter->link_speed = 0; 865 adapter->link_speed = 0;
860 adapter->link_duplex = 0; 866 adapter->link_duplex = 0;
861 867
@@ -886,11 +892,14 @@ void igb_reset(struct igb_adapter *adapter)
886 /* Repartition Pba for greater than 9k mtu 892 /* Repartition Pba for greater than 9k mtu
887 * To take effect CTRL.RST is required. 893 * To take effect CTRL.RST is required.
888 */ 894 */
889 if (mac->type != e1000_82576) { 895 switch (mac->type) {
890 pba = E1000_PBA_34K; 896 case e1000_82576:
891 }
892 else {
893 pba = E1000_PBA_64K; 897 pba = E1000_PBA_64K;
898 break;
899 case e1000_82575:
900 default:
901 pba = E1000_PBA_34K;
902 break;
894 } 903 }
895 904
896 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && 905 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
@@ -972,21 +981,6 @@ void igb_reset(struct igb_adapter *adapter)
972 igb_get_phy_info(&adapter->hw); 981 igb_get_phy_info(&adapter->hw);
973} 982}
974 983
975/**
976 * igb_is_need_ioport - determine if an adapter needs ioport resources or not
977 * @pdev: PCI device information struct
978 *
979 * Returns true if an adapter needs ioport resources
980 **/
981static int igb_is_need_ioport(struct pci_dev *pdev)
982{
983 switch (pdev->device) {
984 /* Currently there are no adapters that need ioport resources */
985 default:
986 return false;
987 }
988}
989
990static const struct net_device_ops igb_netdev_ops = { 984static const struct net_device_ops igb_netdev_ops = {
991 .ndo_open = igb_open, 985 .ndo_open = igb_open,
992 .ndo_stop = igb_close, 986 .ndo_stop = igb_close,
@@ -1026,21 +1020,12 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1026 struct pci_dev *us_dev; 1020 struct pci_dev *us_dev;
1027 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; 1021 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1028 unsigned long mmio_start, mmio_len; 1022 unsigned long mmio_start, mmio_len;
1029 int i, err, pci_using_dac, pos; 1023 int err, pci_using_dac, pos;
1030 u16 eeprom_data = 0, state = 0; 1024 u16 eeprom_data = 0, state = 0;
1031 u16 eeprom_apme_mask = IGB_EEPROM_APME; 1025 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1032 u32 part_num; 1026 u32 part_num;
1033 int bars, need_ioport;
1034 1027
1035 /* do not allocate ioport bars when not needed */ 1028 err = pci_enable_device_mem(pdev);
1036 need_ioport = igb_is_need_ioport(pdev);
1037 if (need_ioport) {
1038 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
1039 err = pci_enable_device(pdev);
1040 } else {
1041 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1042 err = pci_enable_device_mem(pdev);
1043 }
1044 if (err) 1029 if (err)
1045 return err; 1030 return err;
1046 1031
@@ -1083,7 +1068,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1083 break; 1068 break;
1084 } 1069 }
1085 1070
1086 err = pci_request_selected_regions(pdev, bars, igb_driver_name); 1071 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1072 IORESOURCE_MEM),
1073 igb_driver_name);
1087 if (err) 1074 if (err)
1088 goto err_pci_reg; 1075 goto err_pci_reg;
1089 1076
@@ -1111,15 +1098,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1111 hw = &adapter->hw; 1098 hw = &adapter->hw;
1112 hw->back = adapter; 1099 hw->back = adapter;
1113 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE; 1100 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1114 adapter->bars = bars;
1115 adapter->need_ioport = need_ioport;
1116 1101
1117 mmio_start = pci_resource_start(pdev, 0); 1102 mmio_start = pci_resource_start(pdev, 0);
1118 mmio_len = pci_resource_len(pdev, 0); 1103 mmio_len = pci_resource_len(pdev, 0);
1119 1104
1120 err = -EIO; 1105 err = -EIO;
1121 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 1106 hw->hw_addr = ioremap(mmio_start, mmio_len);
1122 if (!adapter->hw.hw_addr) 1107 if (!hw->hw_addr)
1123 goto err_ioremap; 1108 goto err_ioremap;
1124 1109
1125 netdev->netdev_ops = &igb_netdev_ops; 1110 netdev->netdev_ops = &igb_netdev_ops;
@@ -1147,8 +1132,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1147 /* Initialize skew-specific constants */ 1132 /* Initialize skew-specific constants */
1148 err = ei->get_invariants(hw); 1133 err = ei->get_invariants(hw);
1149 if (err) 1134 if (err)
1150 goto err_hw_init; 1135 goto err_sw_init;
1151 1136
1137 /* setup the private structure */
1152 err = igb_sw_init(adapter); 1138 err = igb_sw_init(adapter);
1153 if (err) 1139 if (err)
1154 goto err_sw_init; 1140 goto err_sw_init;
@@ -1180,27 +1166,27 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1180 "PHY reset is blocked due to SOL/IDER session.\n"); 1166 "PHY reset is blocked due to SOL/IDER session.\n");
1181 1167
1182 netdev->features = NETIF_F_SG | 1168 netdev->features = NETIF_F_SG |
1183 NETIF_F_HW_CSUM | 1169 NETIF_F_IP_CSUM |
1184 NETIF_F_HW_VLAN_TX | 1170 NETIF_F_HW_VLAN_TX |
1185 NETIF_F_HW_VLAN_RX | 1171 NETIF_F_HW_VLAN_RX |
1186 NETIF_F_HW_VLAN_FILTER; 1172 NETIF_F_HW_VLAN_FILTER;
1187 1173
1174 netdev->features |= NETIF_F_IPV6_CSUM;
1188 netdev->features |= NETIF_F_TSO; 1175 netdev->features |= NETIF_F_TSO;
1189 netdev->features |= NETIF_F_TSO6; 1176 netdev->features |= NETIF_F_TSO6;
1190 1177
1191#ifdef CONFIG_IGB_LRO 1178#ifdef CONFIG_IGB_LRO
1192 netdev->features |= NETIF_F_LRO; 1179 netdev->features |= NETIF_F_GRO;
1193#endif 1180#endif
1194 1181
1195 netdev->vlan_features |= NETIF_F_TSO; 1182 netdev->vlan_features |= NETIF_F_TSO;
1196 netdev->vlan_features |= NETIF_F_TSO6; 1183 netdev->vlan_features |= NETIF_F_TSO6;
1197 netdev->vlan_features |= NETIF_F_HW_CSUM; 1184 netdev->vlan_features |= NETIF_F_IP_CSUM;
1198 netdev->vlan_features |= NETIF_F_SG; 1185 netdev->vlan_features |= NETIF_F_SG;
1199 1186
1200 if (pci_using_dac) 1187 if (pci_using_dac)
1201 netdev->features |= NETIF_F_HIGHDMA; 1188 netdev->features |= NETIF_F_HIGHDMA;
1202 1189
1203 netdev->features |= NETIF_F_LLTX;
1204 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); 1190 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1205 1191
1206 /* before reading the NVM, reset the controller to put the device in a 1192 /* before reading the NVM, reset the controller to put the device in a
@@ -1238,14 +1224,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1238 INIT_WORK(&adapter->reset_task, igb_reset_task); 1224 INIT_WORK(&adapter->reset_task, igb_reset_task);
1239 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); 1225 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1240 1226
1241 /* Initialize link & ring properties that are user-changeable */ 1227 /* Initialize link properties that are user-changeable */
1242 adapter->tx_ring->count = 256;
1243 for (i = 0; i < adapter->num_tx_queues; i++)
1244 adapter->tx_ring[i].count = adapter->tx_ring->count;
1245 adapter->rx_ring->count = 256;
1246 for (i = 0; i < adapter->num_rx_queues; i++)
1247 adapter->rx_ring[i].count = adapter->rx_ring->count;
1248
1249 adapter->fc_autoneg = true; 1228 adapter->fc_autoneg = true;
1250 hw->mac.autoneg = true; 1229 hw->mac.autoneg = true;
1251 hw->phy.autoneg_advertised = 0x2f; 1230 hw->phy.autoneg_advertised = 0x2f;
@@ -1266,8 +1245,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1266 1245
1267 if (hw->bus.func == 0 || 1246 if (hw->bus.func == 0 ||
1268 hw->device_id == E1000_DEV_ID_82575EB_COPPER) 1247 hw->device_id == E1000_DEV_ID_82575EB_COPPER)
1269 hw->nvm.ops.read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1, 1248 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1270 &eeprom_data);
1271 1249
1272 if (eeprom_data & eeprom_apme_mask) 1250 if (eeprom_data & eeprom_apme_mask)
1273 adapter->eeprom_wol |= E1000_WUFC_MAG; 1251 adapter->eeprom_wol |= E1000_WUFC_MAG;
@@ -1352,15 +1330,14 @@ err_eeprom:
1352 if (hw->flash_address) 1330 if (hw->flash_address)
1353 iounmap(hw->flash_address); 1331 iounmap(hw->flash_address);
1354 1332
1355 igb_remove_device(hw);
1356 igb_free_queues(adapter); 1333 igb_free_queues(adapter);
1357err_sw_init: 1334err_sw_init:
1358err_hw_init:
1359 iounmap(hw->hw_addr); 1335 iounmap(hw->hw_addr);
1360err_ioremap: 1336err_ioremap:
1361 free_netdev(netdev); 1337 free_netdev(netdev);
1362err_alloc_etherdev: 1338err_alloc_etherdev:
1363 pci_release_selected_regions(pdev, bars); 1339 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1340 IORESOURCE_MEM));
1364err_pci_reg: 1341err_pci_reg:
1365err_dma: 1342err_dma:
1366 pci_disable_device(pdev); 1343 pci_disable_device(pdev);
@@ -1380,9 +1357,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1380{ 1357{
1381 struct net_device *netdev = pci_get_drvdata(pdev); 1358 struct net_device *netdev = pci_get_drvdata(pdev);
1382 struct igb_adapter *adapter = netdev_priv(netdev); 1359 struct igb_adapter *adapter = netdev_priv(netdev);
1383#ifdef CONFIG_IGB_DCA
1384 struct e1000_hw *hw = &adapter->hw; 1360 struct e1000_hw *hw = &adapter->hw;
1385#endif
1386 int err; 1361 int err;
1387 1362
1388 /* flush_scheduled work may reschedule our watchdog task, so 1363 /* flush_scheduled work may reschedule our watchdog task, so
@@ -1411,15 +1386,15 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1411 if (!igb_check_reset_block(&adapter->hw)) 1386 if (!igb_check_reset_block(&adapter->hw))
1412 igb_reset_phy(&adapter->hw); 1387 igb_reset_phy(&adapter->hw);
1413 1388
1414 igb_remove_device(&adapter->hw);
1415 igb_reset_interrupt_capability(adapter); 1389 igb_reset_interrupt_capability(adapter);
1416 1390
1417 igb_free_queues(adapter); 1391 igb_free_queues(adapter);
1418 1392
1419 iounmap(adapter->hw.hw_addr); 1393 iounmap(hw->hw_addr);
1420 if (adapter->hw.flash_address) 1394 if (hw->flash_address)
1421 iounmap(adapter->hw.flash_address); 1395 iounmap(hw->flash_address);
1422 pci_release_selected_regions(pdev, adapter->bars); 1396 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1397 IORESOURCE_MEM));
1423 1398
1424 free_netdev(netdev); 1399 free_netdev(netdev);
1425 1400
@@ -1454,11 +1429,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
1454 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1429 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1455 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 1430 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1456 1431
1457 /* Number of supported queues. */
1458 /* Having more queues than CPUs doesn't make sense. */
1459 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1460 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
1461
1462 /* This call may decrease the number of queues depending on 1432 /* This call may decrease the number of queues depending on
1463 * interrupt mode. */ 1433 * interrupt mode. */
1464 igb_set_interrupt_capability(adapter); 1434 igb_set_interrupt_capability(adapter);
@@ -1657,7 +1627,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1657 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { 1627 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
1658 r_idx = i % adapter->num_tx_queues; 1628 r_idx = i % adapter->num_tx_queues;
1659 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; 1629 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
1660 } 1630 }
1661 return err; 1631 return err;
1662} 1632}
1663 1633
@@ -1738,14 +1708,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
1738 struct pci_dev *pdev = adapter->pdev; 1708 struct pci_dev *pdev = adapter->pdev;
1739 int size, desc_len; 1709 int size, desc_len;
1740 1710
1741#ifdef CONFIG_IGB_LRO
1742 size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS;
1743 rx_ring->lro_mgr.lro_arr = vmalloc(size);
1744 if (!rx_ring->lro_mgr.lro_arr)
1745 goto err;
1746 memset(rx_ring->lro_mgr.lro_arr, 0, size);
1747#endif
1748
1749 size = sizeof(struct igb_buffer) * rx_ring->count; 1711 size = sizeof(struct igb_buffer) * rx_ring->count;
1750 rx_ring->buffer_info = vmalloc(size); 1712 rx_ring->buffer_info = vmalloc(size);
1751 if (!rx_ring->buffer_info) 1713 if (!rx_ring->buffer_info)
@@ -1772,10 +1734,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
1772 return 0; 1734 return 0;
1773 1735
1774err: 1736err:
1775#ifdef CONFIG_IGB_LRO
1776 vfree(rx_ring->lro_mgr.lro_arr);
1777 rx_ring->lro_mgr.lro_arr = NULL;
1778#endif
1779 vfree(rx_ring->buffer_info); 1737 vfree(rx_ring->buffer_info);
1780 dev_err(&adapter->pdev->dev, "Unable to allocate memory for " 1738 dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
1781 "the receive descriptor ring\n"); 1739 "the receive descriptor ring\n");
@@ -1824,7 +1782,7 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1824 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 1782 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1825 1783
1826 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | 1784 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
1827 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 1785 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1828 1786
1829 /* 1787 /*
1830 * enable stripping of CRC. It's unlikely this will break BMC 1788 * enable stripping of CRC. It's unlikely this will break BMC
@@ -1929,16 +1887,6 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1929 rxdctl |= IGB_RX_HTHRESH << 8; 1887 rxdctl |= IGB_RX_HTHRESH << 8;
1930 rxdctl |= IGB_RX_WTHRESH << 16; 1888 rxdctl |= IGB_RX_WTHRESH << 16;
1931 wr32(E1000_RXDCTL(j), rxdctl); 1889 wr32(E1000_RXDCTL(j), rxdctl);
1932#ifdef CONFIG_IGB_LRO
1933 /* Intitial LRO Settings */
1934 ring->lro_mgr.max_aggr = MAX_LRO_AGGR;
1935 ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1936 ring->lro_mgr.get_skb_header = igb_get_skb_hdr;
1937 ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1938 ring->lro_mgr.dev = adapter->netdev;
1939 ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1940 ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1941#endif
1942 } 1890 }
1943 1891
1944 if (adapter->num_rx_queues > 1) { 1892 if (adapter->num_rx_queues > 1) {
@@ -2127,11 +2075,6 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
2127 vfree(rx_ring->buffer_info); 2075 vfree(rx_ring->buffer_info);
2128 rx_ring->buffer_info = NULL; 2076 rx_ring->buffer_info = NULL;
2129 2077
2130#ifdef CONFIG_IGB_LRO
2131 vfree(rx_ring->lro_mgr.lro_arr);
2132 rx_ring->lro_mgr.lro_arr = NULL;
2133#endif
2134
2135 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2078 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2136 2079
2137 rx_ring->desc = NULL; 2080 rx_ring->desc = NULL;
@@ -2231,15 +2174,16 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2231static int igb_set_mac(struct net_device *netdev, void *p) 2174static int igb_set_mac(struct net_device *netdev, void *p)
2232{ 2175{
2233 struct igb_adapter *adapter = netdev_priv(netdev); 2176 struct igb_adapter *adapter = netdev_priv(netdev);
2177 struct e1000_hw *hw = &adapter->hw;
2234 struct sockaddr *addr = p; 2178 struct sockaddr *addr = p;
2235 2179
2236 if (!is_valid_ether_addr(addr->sa_data)) 2180 if (!is_valid_ether_addr(addr->sa_data))
2237 return -EADDRNOTAVAIL; 2181 return -EADDRNOTAVAIL;
2238 2182
2239 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2183 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2240 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); 2184 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2241 2185
2242 adapter->hw.mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 2186 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
2243 2187
2244 return 0; 2188 return 0;
2245} 2189}
@@ -2282,8 +2226,8 @@ static void igb_set_multi(struct net_device *netdev)
2282 2226
2283 if (!netdev->mc_count) { 2227 if (!netdev->mc_count) {
2284 /* nothing to program, so clear mc list */ 2228 /* nothing to program, so clear mc list */
2285 igb_update_mc_addr_list_82575(hw, NULL, 0, 1, 2229 igb_update_mc_addr_list(hw, NULL, 0, 1,
2286 mac->rar_entry_count); 2230 mac->rar_entry_count);
2287 return; 2231 return;
2288 } 2232 }
2289 2233
@@ -2300,8 +2244,7 @@ static void igb_set_multi(struct net_device *netdev)
2300 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); 2244 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2301 mc_ptr = mc_ptr->next; 2245 mc_ptr = mc_ptr->next;
2302 } 2246 }
2303 igb_update_mc_addr_list_82575(hw, mta_list, i, 1, 2247 igb_update_mc_addr_list(hw, mta_list, i, 1, mac->rar_entry_count);
2304 mac->rar_entry_count);
2305 kfree(mta_list); 2248 kfree(mta_list);
2306} 2249}
2307 2250
@@ -2314,6 +2257,46 @@ static void igb_update_phy_info(unsigned long data)
2314} 2257}
2315 2258
2316/** 2259/**
2260 * igb_has_link - check shared code for link and determine up/down
2261 * @adapter: pointer to driver private info
2262 **/
2263static bool igb_has_link(struct igb_adapter *adapter)
2264{
2265 struct e1000_hw *hw = &adapter->hw;
2266 bool link_active = false;
2267 s32 ret_val = 0;
2268
2269 /* get_link_status is set on LSC (link status) interrupt or
2270 * rx sequence error interrupt. get_link_status will stay
2271 * false until the e1000_check_for_link establishes link
2272 * for copper adapters ONLY
2273 */
2274 switch (hw->phy.media_type) {
2275 case e1000_media_type_copper:
2276 if (hw->mac.get_link_status) {
2277 ret_val = hw->mac.ops.check_for_link(hw);
2278 link_active = !hw->mac.get_link_status;
2279 } else {
2280 link_active = true;
2281 }
2282 break;
2283 case e1000_media_type_fiber:
2284 ret_val = hw->mac.ops.check_for_link(hw);
2285 link_active = !!(rd32(E1000_STATUS) & E1000_STATUS_LU);
2286 break;
2287 case e1000_media_type_internal_serdes:
2288 ret_val = hw->mac.ops.check_for_link(hw);
2289 link_active = hw->mac.serdes_has_link;
2290 break;
2291 default:
2292 case e1000_media_type_unknown:
2293 break;
2294 }
2295
2296 return link_active;
2297}
2298
2299/**
2317 * igb_watchdog - Timer Call-back 2300 * igb_watchdog - Timer Call-back
2318 * @data: pointer to adapter cast into an unsigned long 2301 * @data: pointer to adapter cast into an unsigned long
2319 **/ 2302 **/
@@ -2329,34 +2312,16 @@ static void igb_watchdog_task(struct work_struct *work)
2329 struct igb_adapter *adapter = container_of(work, 2312 struct igb_adapter *adapter = container_of(work,
2330 struct igb_adapter, watchdog_task); 2313 struct igb_adapter, watchdog_task);
2331 struct e1000_hw *hw = &adapter->hw; 2314 struct e1000_hw *hw = &adapter->hw;
2332
2333 struct net_device *netdev = adapter->netdev; 2315 struct net_device *netdev = adapter->netdev;
2334 struct igb_ring *tx_ring = adapter->tx_ring; 2316 struct igb_ring *tx_ring = adapter->tx_ring;
2335 struct e1000_mac_info *mac = &adapter->hw.mac;
2336 u32 link; 2317 u32 link;
2337 u32 eics = 0; 2318 u32 eics = 0;
2338 s32 ret_val;
2339 int i; 2319 int i;
2340 2320
2341 if ((netif_carrier_ok(netdev)) && 2321 link = igb_has_link(adapter);
2342 (rd32(E1000_STATUS) & E1000_STATUS_LU)) 2322 if ((netif_carrier_ok(netdev)) && link)
2343 goto link_up; 2323 goto link_up;
2344 2324
2345 ret_val = hw->mac.ops.check_for_link(&adapter->hw);
2346 if ((ret_val == E1000_ERR_PHY) &&
2347 (hw->phy.type == e1000_phy_igp_3) &&
2348 (rd32(E1000_CTRL) &
2349 E1000_PHY_CTRL_GBE_DISABLE))
2350 dev_info(&adapter->pdev->dev,
2351 "Gigabit has been disabled, downgrading speed\n");
2352
2353 if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
2354 !(rd32(E1000_TXCW) & E1000_TXCW_ANE))
2355 link = mac->serdes_has_link;
2356 else
2357 link = rd32(E1000_STATUS) &
2358 E1000_STATUS_LU;
2359
2360 if (link) { 2325 if (link) {
2361 if (!netif_carrier_ok(netdev)) { 2326 if (!netif_carrier_ok(netdev)) {
2362 u32 ctrl; 2327 u32 ctrl;
@@ -2395,6 +2360,7 @@ static void igb_watchdog_task(struct work_struct *work)
2395 netif_carrier_on(netdev); 2360 netif_carrier_on(netdev);
2396 netif_tx_wake_all_queues(netdev); 2361 netif_tx_wake_all_queues(netdev);
2397 2362
2363 /* link state has changed, schedule phy info update */
2398 if (!test_bit(__IGB_DOWN, &adapter->state)) 2364 if (!test_bit(__IGB_DOWN, &adapter->state))
2399 mod_timer(&adapter->phy_info_timer, 2365 mod_timer(&adapter->phy_info_timer,
2400 round_jiffies(jiffies + 2 * HZ)); 2366 round_jiffies(jiffies + 2 * HZ));
@@ -2408,6 +2374,8 @@ static void igb_watchdog_task(struct work_struct *work)
2408 netdev->name); 2374 netdev->name);
2409 netif_carrier_off(netdev); 2375 netif_carrier_off(netdev);
2410 netif_tx_stop_all_queues(netdev); 2376 netif_tx_stop_all_queues(netdev);
2377
2378 /* link state has changed, schedule phy info update */
2411 if (!test_bit(__IGB_DOWN, &adapter->state)) 2379 if (!test_bit(__IGB_DOWN, &adapter->state))
2412 mod_timer(&adapter->phy_info_timer, 2380 mod_timer(&adapter->phy_info_timer,
2413 round_jiffies(jiffies + 2 * HZ)); 2381 round_jiffies(jiffies + 2 * HZ));
@@ -2417,9 +2385,9 @@ static void igb_watchdog_task(struct work_struct *work)
2417link_up: 2385link_up:
2418 igb_update_stats(adapter); 2386 igb_update_stats(adapter);
2419 2387
2420 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 2388 hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2421 adapter->tpt_old = adapter->stats.tpt; 2389 adapter->tpt_old = adapter->stats.tpt;
2422 mac->collision_delta = adapter->stats.colc - adapter->colc_old; 2390 hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
2423 adapter->colc_old = adapter->stats.colc; 2391 adapter->colc_old = adapter->stats.colc;
2424 2392
2425 adapter->gorc = adapter->stats.gorc - adapter->gorc_old; 2393 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
@@ -2779,12 +2747,12 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2779 2747
2780 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2748 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2781 switch (skb->protocol) { 2749 switch (skb->protocol) {
2782 case __constant_htons(ETH_P_IP): 2750 case cpu_to_be16(ETH_P_IP):
2783 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 2751 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2784 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2752 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2785 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 2753 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2786 break; 2754 break;
2787 case __constant_htons(ETH_P_IPV6): 2755 case cpu_to_be16(ETH_P_IPV6):
2788 /* XXX what about other V6 headers?? */ 2756 /* XXX what about other V6 headers?? */
2789 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2757 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2790 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 2758 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
@@ -2803,6 +2771,8 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2803 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) 2771 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2804 context_desc->mss_l4len_idx = 2772 context_desc->mss_l4len_idx =
2805 cpu_to_le32(tx_ring->queue_index << 4); 2773 cpu_to_le32(tx_ring->queue_index << 4);
2774 else
2775 context_desc->mss_l4len_idx = 0;
2806 2776
2807 buffer_info->time_stamp = jiffies; 2777 buffer_info->time_stamp = jiffies;
2808 buffer_info->next_to_watch = i; 2778 buffer_info->next_to_watch = i;
@@ -2981,12 +2951,9 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2981 struct igb_adapter *adapter = netdev_priv(netdev); 2951 struct igb_adapter *adapter = netdev_priv(netdev);
2982 unsigned int first; 2952 unsigned int first;
2983 unsigned int tx_flags = 0; 2953 unsigned int tx_flags = 0;
2984 unsigned int len;
2985 u8 hdr_len = 0; 2954 u8 hdr_len = 0;
2986 int tso = 0; 2955 int tso = 0;
2987 2956
2988 len = skb_headlen(skb);
2989
2990 if (test_bit(__IGB_DOWN, &adapter->state)) { 2957 if (test_bit(__IGB_DOWN, &adapter->state)) {
2991 dev_kfree_skb_any(skb); 2958 dev_kfree_skb_any(skb);
2992 return NETDEV_TX_OK; 2959 return NETDEV_TX_OK;
@@ -3072,8 +3039,8 @@ static void igb_tx_timeout(struct net_device *netdev)
3072 /* Do the reset outside of interrupt context */ 3039 /* Do the reset outside of interrupt context */
3073 adapter->tx_timeout_count++; 3040 adapter->tx_timeout_count++;
3074 schedule_work(&adapter->reset_task); 3041 schedule_work(&adapter->reset_task);
3075 wr32(E1000_EICS, adapter->eims_enable_mask & 3042 wr32(E1000_EICS,
3076 ~(E1000_EIMS_TCP_TIMER | E1000_EIMS_OTHER)); 3043 (adapter->eims_enable_mask & ~adapter->eims_other));
3077} 3044}
3078 3045
3079static void igb_reset_task(struct work_struct *work) 3046static void igb_reset_task(struct work_struct *work)
@@ -3317,15 +3284,20 @@ static irqreturn_t igb_msix_other(int irq, void *data)
3317 u32 icr = rd32(E1000_ICR); 3284 u32 icr = rd32(E1000_ICR);
3318 3285
3319 /* reading ICR causes bit 31 of EICR to be cleared */ 3286 /* reading ICR causes bit 31 of EICR to be cleared */
3287
3288 if(icr & E1000_ICR_DOUTSYNC) {
3289 /* HW is reporting DMA is out of sync */
3290 adapter->stats.doosync++;
3291 }
3320 if (!(icr & E1000_ICR_LSC)) 3292 if (!(icr & E1000_ICR_LSC))
3321 goto no_link_interrupt; 3293 goto no_link_interrupt;
3322 hw->mac.get_link_status = 1; 3294 hw->mac.get_link_status = 1;
3323 /* guard against interrupt when we're going down */ 3295 /* guard against interrupt when we're going down */
3324 if (!test_bit(__IGB_DOWN, &adapter->state)) 3296 if (!test_bit(__IGB_DOWN, &adapter->state))
3325 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3297 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3326 3298
3327no_link_interrupt: 3299no_link_interrupt:
3328 wr32(E1000_IMS, E1000_IMS_LSC); 3300 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
3329 wr32(E1000_EIMS, adapter->eims_other); 3301 wr32(E1000_EIMS, adapter->eims_other);
3330 3302
3331 return IRQ_HANDLED; 3303 return IRQ_HANDLED;
@@ -3385,8 +3357,8 @@ static irqreturn_t igb_msix_rx(int irq, void *data)
3385 3357
3386 igb_write_itr(rx_ring); 3358 igb_write_itr(rx_ring);
3387 3359
3388 if (netif_rx_schedule_prep(&rx_ring->napi)) 3360 if (napi_schedule_prep(&rx_ring->napi))
3389 __netif_rx_schedule(&rx_ring->napi); 3361 __napi_schedule(&rx_ring->napi);
3390 3362
3391#ifdef CONFIG_IGB_DCA 3363#ifdef CONFIG_IGB_DCA
3392 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) 3364 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
@@ -3529,19 +3501,24 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
3529 3501
3530 igb_write_itr(adapter->rx_ring); 3502 igb_write_itr(adapter->rx_ring);
3531 3503
3504 if(icr & E1000_ICR_DOUTSYNC) {
3505 /* HW is reporting DMA is out of sync */
3506 adapter->stats.doosync++;
3507 }
3508
3532 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3509 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3533 hw->mac.get_link_status = 1; 3510 hw->mac.get_link_status = 1;
3534 if (!test_bit(__IGB_DOWN, &adapter->state)) 3511 if (!test_bit(__IGB_DOWN, &adapter->state))
3535 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3512 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3536 } 3513 }
3537 3514
3538 netif_rx_schedule(&adapter->rx_ring[0].napi); 3515 napi_schedule(&adapter->rx_ring[0].napi);
3539 3516
3540 return IRQ_HANDLED; 3517 return IRQ_HANDLED;
3541} 3518}
3542 3519
3543/** 3520/**
3544 * igb_intr - Interrupt Handler 3521 * igb_intr - Legacy Interrupt Handler
3545 * @irq: interrupt number 3522 * @irq: interrupt number
3546 * @data: pointer to a network interface device structure 3523 * @data: pointer to a network interface device structure
3547 **/ 3524 **/
@@ -3553,7 +3530,6 @@ static irqreturn_t igb_intr(int irq, void *data)
3553 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 3530 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
3554 * need for the IMC write */ 3531 * need for the IMC write */
3555 u32 icr = rd32(E1000_ICR); 3532 u32 icr = rd32(E1000_ICR);
3556 u32 eicr = 0;
3557 if (!icr) 3533 if (!icr)
3558 return IRQ_NONE; /* Not our interrupt */ 3534 return IRQ_NONE; /* Not our interrupt */
3559 3535
@@ -3564,7 +3540,10 @@ static irqreturn_t igb_intr(int irq, void *data)
3564 if (!(icr & E1000_ICR_INT_ASSERTED)) 3540 if (!(icr & E1000_ICR_INT_ASSERTED))
3565 return IRQ_NONE; 3541 return IRQ_NONE;
3566 3542
3567 eicr = rd32(E1000_EICR); 3543 if(icr & E1000_ICR_DOUTSYNC) {
3544 /* HW is reporting DMA is out of sync */
3545 adapter->stats.doosync++;
3546 }
3568 3547
3569 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3548 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3570 hw->mac.get_link_status = 1; 3549 hw->mac.get_link_status = 1;
@@ -3573,7 +3552,7 @@ static irqreturn_t igb_intr(int irq, void *data)
3573 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3552 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3574 } 3553 }
3575 3554
3576 netif_rx_schedule(&adapter->rx_ring[0].napi); 3555 napi_schedule(&adapter->rx_ring[0].napi);
3577 3556
3578 return IRQ_HANDLED; 3557 return IRQ_HANDLED;
3579} 3558}
@@ -3608,7 +3587,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
3608 !netif_running(netdev)) { 3587 !netif_running(netdev)) {
3609 if (adapter->itr_setting & 3) 3588 if (adapter->itr_setting & 3)
3610 igb_set_itr(adapter); 3589 igb_set_itr(adapter);
3611 netif_rx_complete(napi); 3590 napi_complete(napi);
3612 if (!test_bit(__IGB_DOWN, &adapter->state)) 3591 if (!test_bit(__IGB_DOWN, &adapter->state))
3613 igb_irq_enable(adapter); 3592 igb_irq_enable(adapter);
3614 return 0; 3593 return 0;
@@ -3634,7 +3613,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3634 3613
3635 /* If not enough Rx work done, exit the polling mode */ 3614 /* If not enough Rx work done, exit the polling mode */
3636 if ((work_done == 0) || !netif_running(netdev)) { 3615 if ((work_done == 0) || !netif_running(netdev)) {
3637 netif_rx_complete(napi); 3616 napi_complete(napi);
3638 3617
3639 if (adapter->itr_setting & 3) { 3618 if (adapter->itr_setting & 3) {
3640 if (adapter->num_rx_queues == 1) 3619 if (adapter->num_rx_queues == 1)
@@ -3764,42 +3743,9 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3764 return (count < tx_ring->count); 3743 return (count < tx_ring->count);
3765} 3744}
3766 3745
3767#ifdef CONFIG_IGB_LRO
3768 /**
3769 * igb_get_skb_hdr - helper function for LRO header processing
3770 * @skb: pointer to sk_buff to be added to LRO packet
3771 * @iphdr: pointer to ip header structure
3772 * @tcph: pointer to tcp header structure
3773 * @hdr_flags: pointer to header flags
3774 * @priv: pointer to the receive descriptor for the current sk_buff
3775 **/
3776static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
3777 u64 *hdr_flags, void *priv)
3778{
3779 union e1000_adv_rx_desc *rx_desc = priv;
3780 u16 pkt_type = rx_desc->wb.lower.lo_dword.pkt_info &
3781 (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP);
3782
3783 /* Verify that this is a valid IPv4 TCP packet */
3784 if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 |
3785 E1000_RXDADV_PKTTYPE_TCP))
3786 return -1;
3787
3788 /* Set network headers */
3789 skb_reset_network_header(skb);
3790 skb_set_transport_header(skb, ip_hdrlen(skb));
3791 *iphdr = ip_hdr(skb);
3792 *tcph = tcp_hdr(skb);
3793 *hdr_flags = LRO_IPV4 | LRO_TCP;
3794
3795 return 0;
3796
3797}
3798#endif /* CONFIG_IGB_LRO */
3799
3800/** 3746/**
3801 * igb_receive_skb - helper function to handle rx indications 3747 * igb_receive_skb - helper function to handle rx indications
3802 * @ring: pointer to receive ring receving this packet 3748 * @ring: pointer to receive ring receving this packet
3803 * @status: descriptor status field as written by hardware 3749 * @status: descriptor status field as written by hardware
3804 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 3750 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3805 * @skb: pointer to sk_buff to be indicated to stack 3751 * @skb: pointer to sk_buff to be indicated to stack
@@ -3811,28 +3757,21 @@ static void igb_receive_skb(struct igb_ring *ring, u8 status,
3811 struct igb_adapter * adapter = ring->adapter; 3757 struct igb_adapter * adapter = ring->adapter;
3812 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); 3758 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
3813 3759
3814#ifdef CONFIG_IGB_LRO 3760 skb_record_rx_queue(skb, ring->queue_index);
3815 if (adapter->netdev->features & NETIF_F_LRO && 3761 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3816 skb->ip_summed == CHECKSUM_UNNECESSARY) {
3817 if (vlan_extracted) 3762 if (vlan_extracted)
3818 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, 3763 vlan_gro_receive(&ring->napi, adapter->vlgrp,
3819 adapter->vlgrp, 3764 le16_to_cpu(rx_desc->wb.upper.vlan),
3820 le16_to_cpu(rx_desc->wb.upper.vlan), 3765 skb);
3821 rx_desc);
3822 else 3766 else
3823 lro_receive_skb(&ring->lro_mgr,skb, rx_desc); 3767 napi_gro_receive(&ring->napi, skb);
3824 ring->lro_used = 1;
3825 } else { 3768 } else {
3826#endif
3827 if (vlan_extracted) 3769 if (vlan_extracted)
3828 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3770 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3829 le16_to_cpu(rx_desc->wb.upper.vlan)); 3771 le16_to_cpu(rx_desc->wb.upper.vlan));
3830 else 3772 else
3831
3832 netif_receive_skb(skb); 3773 netif_receive_skb(skb);
3833#ifdef CONFIG_IGB_LRO
3834 } 3774 }
3835#endif
3836} 3775}
3837 3776
3838 3777
@@ -3874,6 +3813,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3874 unsigned int total_bytes = 0, total_packets = 0; 3813 unsigned int total_bytes = 0, total_packets = 0;
3875 3814
3876 i = rx_ring->next_to_clean; 3815 i = rx_ring->next_to_clean;
3816 buffer_info = &rx_ring->buffer_info[i];
3877 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 3817 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
3878 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 3818 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3879 3819
@@ -3881,25 +3821,22 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3881 if (*work_done >= budget) 3821 if (*work_done >= budget)
3882 break; 3822 break;
3883 (*work_done)++; 3823 (*work_done)++;
3884 buffer_info = &rx_ring->buffer_info[i];
3885 3824
3886 /* HW will not DMA in data larger than the given buffer, even 3825 skb = buffer_info->skb;
3887 * if it parses the (NFS, of course) header to be larger. In 3826 prefetch(skb->data - NET_IP_ALIGN);
3888 * that case, it fills the header buffer and spills the rest 3827 buffer_info->skb = NULL;
3889 * into the page. 3828
3890 */ 3829 i++;
3891 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & 3830 if (i == rx_ring->count)
3892 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; 3831 i = 0;
3893 if (hlen > adapter->rx_ps_hdr_size) 3832 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
3894 hlen = adapter->rx_ps_hdr_size; 3833 prefetch(next_rxd);
3834 next_buffer = &rx_ring->buffer_info[i];
3895 3835
3896 length = le16_to_cpu(rx_desc->wb.upper.length); 3836 length = le16_to_cpu(rx_desc->wb.upper.length);
3897 cleaned = true; 3837 cleaned = true;
3898 cleaned_count++; 3838 cleaned_count++;
3899 3839
3900 skb = buffer_info->skb;
3901 prefetch(skb->data - NET_IP_ALIGN);
3902 buffer_info->skb = NULL;
3903 if (!adapter->rx_ps_hdr_size) { 3840 if (!adapter->rx_ps_hdr_size) {
3904 pci_unmap_single(pdev, buffer_info->dma, 3841 pci_unmap_single(pdev, buffer_info->dma,
3905 adapter->rx_buffer_len + 3842 adapter->rx_buffer_len +
@@ -3909,6 +3846,16 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3909 goto send_up; 3846 goto send_up;
3910 } 3847 }
3911 3848
3849 /* HW will not DMA in data larger than the given buffer, even
3850 * if it parses the (NFS, of course) header to be larger. In
3851 * that case, it fills the header buffer and spills the rest
3852 * into the page.
3853 */
3854 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
3855 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
3856 if (hlen > adapter->rx_ps_hdr_size)
3857 hlen = adapter->rx_ps_hdr_size;
3858
3912 if (!skb_shinfo(skb)->nr_frags) { 3859 if (!skb_shinfo(skb)->nr_frags) {
3913 pci_unmap_single(pdev, buffer_info->dma, 3860 pci_unmap_single(pdev, buffer_info->dma,
3914 adapter->rx_ps_hdr_size + 3861 adapter->rx_ps_hdr_size +
@@ -3938,13 +3885,6 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3938 3885
3939 skb->truesize += length; 3886 skb->truesize += length;
3940 } 3887 }
3941send_up:
3942 i++;
3943 if (i == rx_ring->count)
3944 i = 0;
3945 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
3946 prefetch(next_rxd);
3947 next_buffer = &rx_ring->buffer_info[i];
3948 3888
3949 if (!(staterr & E1000_RXD_STAT_EOP)) { 3889 if (!(staterr & E1000_RXD_STAT_EOP)) {
3950 buffer_info->skb = next_buffer->skb; 3890 buffer_info->skb = next_buffer->skb;
@@ -3953,7 +3893,7 @@ send_up:
3953 next_buffer->dma = 0; 3893 next_buffer->dma = 0;
3954 goto next_desc; 3894 goto next_desc;
3955 } 3895 }
3956 3896send_up:
3957 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 3897 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
3958 dev_kfree_skb_irq(skb); 3898 dev_kfree_skb_irq(skb);
3959 goto next_desc; 3899 goto next_desc;
@@ -3980,20 +3920,12 @@ next_desc:
3980 /* use prefetched values */ 3920 /* use prefetched values */
3981 rx_desc = next_rxd; 3921 rx_desc = next_rxd;
3982 buffer_info = next_buffer; 3922 buffer_info = next_buffer;
3983
3984 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 3923 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3985 } 3924 }
3986 3925
3987 rx_ring->next_to_clean = i; 3926 rx_ring->next_to_clean = i;
3988 cleaned_count = IGB_DESC_UNUSED(rx_ring); 3927 cleaned_count = IGB_DESC_UNUSED(rx_ring);
3989 3928
3990#ifdef CONFIG_IGB_LRO
3991 if (rx_ring->lro_used) {
3992 lro_flush_all(&rx_ring->lro_mgr);
3993 rx_ring->lro_used = 0;
3994 }
3995#endif
3996
3997 if (cleaned_count) 3929 if (cleaned_count)
3998 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); 3930 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
3999 3931
@@ -4021,10 +3953,17 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4021 struct igb_buffer *buffer_info; 3953 struct igb_buffer *buffer_info;
4022 struct sk_buff *skb; 3954 struct sk_buff *skb;
4023 unsigned int i; 3955 unsigned int i;
3956 int bufsz;
4024 3957
4025 i = rx_ring->next_to_use; 3958 i = rx_ring->next_to_use;
4026 buffer_info = &rx_ring->buffer_info[i]; 3959 buffer_info = &rx_ring->buffer_info[i];
4027 3960
3961 if (adapter->rx_ps_hdr_size)
3962 bufsz = adapter->rx_ps_hdr_size;
3963 else
3964 bufsz = adapter->rx_buffer_len;
3965 bufsz += NET_IP_ALIGN;
3966
4028 while (cleaned_count--) { 3967 while (cleaned_count--) {
4029 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 3968 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4030 3969
@@ -4040,23 +3979,14 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4040 buffer_info->page_offset ^= PAGE_SIZE / 2; 3979 buffer_info->page_offset ^= PAGE_SIZE / 2;
4041 } 3980 }
4042 buffer_info->page_dma = 3981 buffer_info->page_dma =
4043 pci_map_page(pdev, 3982 pci_map_page(pdev, buffer_info->page,
4044 buffer_info->page,
4045 buffer_info->page_offset, 3983 buffer_info->page_offset,
4046 PAGE_SIZE / 2, 3984 PAGE_SIZE / 2,
4047 PCI_DMA_FROMDEVICE); 3985 PCI_DMA_FROMDEVICE);
4048 } 3986 }
4049 3987
4050 if (!buffer_info->skb) { 3988 if (!buffer_info->skb) {
4051 int bufsz;
4052
4053 if (adapter->rx_ps_hdr_size)
4054 bufsz = adapter->rx_ps_hdr_size;
4055 else
4056 bufsz = adapter->rx_buffer_len;
4057 bufsz += NET_IP_ALIGN;
4058 skb = netdev_alloc_skb(netdev, bufsz); 3989 skb = netdev_alloc_skb(netdev, bufsz);
4059
4060 if (!skb) { 3990 if (!skb) {
4061 adapter->alloc_rx_buff_failed++; 3991 adapter->alloc_rx_buff_failed++;
4062 goto no_buffers; 3992 goto no_buffers;
@@ -4072,7 +4002,6 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4072 buffer_info->dma = pci_map_single(pdev, skb->data, 4002 buffer_info->dma = pci_map_single(pdev, skb->data,
4073 bufsz, 4003 bufsz,
4074 PCI_DMA_FROMDEVICE); 4004 PCI_DMA_FROMDEVICE);
4075
4076 } 4005 }
4077 /* Refresh the desc even if buffer_addrs didn't change because 4006 /* Refresh the desc even if buffer_addrs didn't change because
4078 * each write-back erases this info. */ 4007 * each write-back erases this info. */
@@ -4206,7 +4135,7 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4206 struct e1000_hw *hw = &adapter->hw; 4135 struct e1000_hw *hw = &adapter->hw;
4207 u32 vfta, index; 4136 u32 vfta, index;
4208 4137
4209 if ((adapter->hw.mng_cookie.status & 4138 if ((hw->mng_cookie.status &
4210 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 4139 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
4211 (vid == adapter->mng_vlan_id)) 4140 (vid == adapter->mng_vlan_id))
4212 return; 4141 return;
@@ -4390,10 +4319,7 @@ static int igb_resume(struct pci_dev *pdev)
4390 pci_set_power_state(pdev, PCI_D0); 4319 pci_set_power_state(pdev, PCI_D0);
4391 pci_restore_state(pdev); 4320 pci_restore_state(pdev);
4392 4321
4393 if (adapter->need_ioport) 4322 err = pci_enable_device_mem(pdev);
4394 err = pci_enable_device(pdev);
4395 else
4396 err = pci_enable_device_mem(pdev);
4397 if (err) { 4323 if (err) {
4398 dev_err(&pdev->dev, 4324 dev_err(&pdev->dev,
4399 "igb: Cannot enable PCI device from suspend\n"); 4325 "igb: Cannot enable PCI device from suspend\n");
@@ -4414,6 +4340,11 @@ static int igb_resume(struct pci_dev *pdev)
4414 /* e1000_power_up_phy(adapter); */ 4340 /* e1000_power_up_phy(adapter); */
4415 4341
4416 igb_reset(adapter); 4342 igb_reset(adapter);
4343
4344 /* let the f/w know that the h/w is now under the control of the
4345 * driver. */
4346 igb_get_hw_control(adapter);
4347
4417 wr32(E1000_WUS, ~0); 4348 wr32(E1000_WUS, ~0);
4418 4349
4419 if (netif_running(netdev)) { 4350 if (netif_running(netdev)) {
@@ -4424,10 +4355,6 @@ static int igb_resume(struct pci_dev *pdev)
4424 4355
4425 netif_device_attach(netdev); 4356 netif_device_attach(netdev);
4426 4357
4427 /* let the f/w know that the h/w is now under the control of the
4428 * driver. */
4429 igb_get_hw_control(adapter);
4430
4431 return 0; 4358 return 0;
4432} 4359}
4433#endif 4360#endif
@@ -4446,22 +4373,27 @@ static void igb_shutdown(struct pci_dev *pdev)
4446static void igb_netpoll(struct net_device *netdev) 4373static void igb_netpoll(struct net_device *netdev)
4447{ 4374{
4448 struct igb_adapter *adapter = netdev_priv(netdev); 4375 struct igb_adapter *adapter = netdev_priv(netdev);
4376 struct e1000_hw *hw = &adapter->hw;
4449 int i; 4377 int i;
4450 int work_done = 0;
4451 4378
4452 igb_irq_disable(adapter); 4379 if (!adapter->msix_entries) {
4453 adapter->flags |= IGB_FLAG_IN_NETPOLL; 4380 igb_irq_disable(adapter);
4454 4381 napi_schedule(&adapter->rx_ring[0].napi);
4455 for (i = 0; i < adapter->num_tx_queues; i++) 4382 return;
4456 igb_clean_tx_irq(&adapter->tx_ring[i]); 4383 }
4457 4384
4458 for (i = 0; i < adapter->num_rx_queues; i++) 4385 for (i = 0; i < adapter->num_tx_queues; i++) {
4459 igb_clean_rx_irq_adv(&adapter->rx_ring[i], 4386 struct igb_ring *tx_ring = &adapter->tx_ring[i];
4460 &work_done, 4387 wr32(E1000_EIMC, tx_ring->eims_value);
4461 adapter->rx_ring[i].napi.weight); 4388 igb_clean_tx_irq(tx_ring);
4389 wr32(E1000_EIMS, tx_ring->eims_value);
4390 }
4462 4391
4463 adapter->flags &= ~IGB_FLAG_IN_NETPOLL; 4392 for (i = 0; i < adapter->num_rx_queues; i++) {
4464 igb_irq_enable(adapter); 4393 struct igb_ring *rx_ring = &adapter->rx_ring[i];
4394 wr32(E1000_EIMC, rx_ring->eims_value);
4395 napi_schedule(&rx_ring->napi);
4396 }
4465} 4397}
4466#endif /* CONFIG_NET_POLL_CONTROLLER */ 4398#endif /* CONFIG_NET_POLL_CONTROLLER */
4467 4399
@@ -4504,12 +4436,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
4504 pci_ers_result_t result; 4436 pci_ers_result_t result;
4505 int err; 4437 int err;
4506 4438
4507 if (adapter->need_ioport) 4439 if (pci_enable_device_mem(pdev)) {
4508 err = pci_enable_device(pdev);
4509 else
4510 err = pci_enable_device_mem(pdev);
4511
4512 if (err) {
4513 dev_err(&pdev->dev, 4440 dev_err(&pdev->dev,
4514 "Cannot re-enable PCI device after reset.\n"); 4441 "Cannot re-enable PCI device after reset.\n");
4515 result = PCI_ERS_RESULT_DISCONNECT; 4442 result = PCI_ERS_RESULT_DISCONNECT;