aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb/igb_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/igb/igb_main.c')
-rw-r--r--drivers/net/igb/igb_main.c403
1 files changed, 222 insertions, 181 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 20d27e622ec1..022794e579c7 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -42,6 +42,7 @@
42#include <linux/delay.h> 42#include <linux/delay.h>
43#include <linux/interrupt.h> 43#include <linux/interrupt.h>
44#include <linux/if_ether.h> 44#include <linux/if_ether.h>
45#include <linux/aer.h>
45#ifdef CONFIG_IGB_DCA 46#ifdef CONFIG_IGB_DCA
46#include <linux/dca.h> 47#include <linux/dca.h>
47#endif 48#endif
@@ -76,8 +77,6 @@ static int igb_setup_all_tx_resources(struct igb_adapter *);
76static int igb_setup_all_rx_resources(struct igb_adapter *); 77static int igb_setup_all_rx_resources(struct igb_adapter *);
77static void igb_free_all_tx_resources(struct igb_adapter *); 78static void igb_free_all_tx_resources(struct igb_adapter *);
78static void igb_free_all_rx_resources(struct igb_adapter *); 79static void igb_free_all_rx_resources(struct igb_adapter *);
79static void igb_free_tx_resources(struct igb_ring *);
80static void igb_free_rx_resources(struct igb_ring *);
81void igb_update_stats(struct igb_adapter *); 80void igb_update_stats(struct igb_adapter *);
82static int igb_probe(struct pci_dev *, const struct pci_device_id *); 81static int igb_probe(struct pci_dev *, const struct pci_device_id *);
83static void __devexit igb_remove(struct pci_dev *pdev); 82static void __devexit igb_remove(struct pci_dev *pdev);
@@ -232,6 +231,40 @@ static void __exit igb_exit_module(void)
232 231
233module_exit(igb_exit_module); 232module_exit(igb_exit_module);
234 233
234#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
235/**
236 * igb_cache_ring_register - Descriptor ring to register mapping
237 * @adapter: board private structure to initialize
238 *
239 * Once we know the feature-set enabled for the device, we'll cache
240 * the register offset the descriptor ring is assigned to.
241 **/
242static void igb_cache_ring_register(struct igb_adapter *adapter)
243{
244 int i;
245
246 switch (adapter->hw.mac.type) {
247 case e1000_82576:
248 /* The queues are allocated for virtualization such that VF 0
249 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
250 * In order to avoid collision we start at the first free queue
251 * and continue consuming queues in the same sequence
252 */
253 for (i = 0; i < adapter->num_rx_queues; i++)
254 adapter->rx_ring[i].reg_idx = Q_IDX_82576(i);
255 for (i = 0; i < adapter->num_tx_queues; i++)
256 adapter->tx_ring[i].reg_idx = Q_IDX_82576(i);
257 break;
258 case e1000_82575:
259 default:
260 for (i = 0; i < adapter->num_rx_queues; i++)
261 adapter->rx_ring[i].reg_idx = i;
262 for (i = 0; i < adapter->num_tx_queues; i++)
263 adapter->tx_ring[i].reg_idx = i;
264 break;
265 }
266}
267
235/** 268/**
236 * igb_alloc_queues - Allocate memory for all rings 269 * igb_alloc_queues - Allocate memory for all rings
237 * @adapter: board private structure to initialize 270 * @adapter: board private structure to initialize
@@ -259,11 +292,13 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
259 292
260 for (i = 0; i < adapter->num_tx_queues; i++) { 293 for (i = 0; i < adapter->num_tx_queues; i++) {
261 struct igb_ring *ring = &(adapter->tx_ring[i]); 294 struct igb_ring *ring = &(adapter->tx_ring[i]);
295 ring->count = adapter->tx_ring_count;
262 ring->adapter = adapter; 296 ring->adapter = adapter;
263 ring->queue_index = i; 297 ring->queue_index = i;
264 } 298 }
265 for (i = 0; i < adapter->num_rx_queues; i++) { 299 for (i = 0; i < adapter->num_rx_queues; i++) {
266 struct igb_ring *ring = &(adapter->rx_ring[i]); 300 struct igb_ring *ring = &(adapter->rx_ring[i]);
301 ring->count = adapter->rx_ring_count;
267 ring->adapter = adapter; 302 ring->adapter = adapter;
268 ring->queue_index = i; 303 ring->queue_index = i;
269 ring->itr_register = E1000_ITR; 304 ring->itr_register = E1000_ITR;
@@ -271,6 +306,8 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
271 /* set a default napi handler for each rx_ring */ 306 /* set a default napi handler for each rx_ring */
272 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64); 307 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
273 } 308 }
309
310 igb_cache_ring_register(adapter);
274 return 0; 311 return 0;
275} 312}
276 313
@@ -311,36 +348,36 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
311 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 348 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
312 break; 349 break;
313 case e1000_82576: 350 case e1000_82576:
314 /* The 82576 uses a table-based method for assigning vectors. 351 /* 82576 uses a table-based method for assigning vectors.
315 Each queue has a single entry in the table to which we write 352 Each queue has a single entry in the table to which we write
316 a vector number along with a "valid" bit. Sadly, the layout 353 a vector number along with a "valid" bit. Sadly, the layout
317 of the table is somewhat counterintuitive. */ 354 of the table is somewhat counterintuitive. */
318 if (rx_queue > IGB_N0_QUEUE) { 355 if (rx_queue > IGB_N0_QUEUE) {
319 index = (rx_queue & 0x7); 356 index = (rx_queue >> 1);
320 ivar = array_rd32(E1000_IVAR0, index); 357 ivar = array_rd32(E1000_IVAR0, index);
321 if (rx_queue < 8) { 358 if (rx_queue & 0x1) {
322 /* vector goes into low byte of register */
323 ivar = ivar & 0xFFFFFF00;
324 ivar |= msix_vector | E1000_IVAR_VALID;
325 } else {
326 /* vector goes into third byte of register */ 359 /* vector goes into third byte of register */
327 ivar = ivar & 0xFF00FFFF; 360 ivar = ivar & 0xFF00FFFF;
328 ivar |= (msix_vector | E1000_IVAR_VALID) << 16; 361 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
362 } else {
363 /* vector goes into low byte of register */
364 ivar = ivar & 0xFFFFFF00;
365 ivar |= msix_vector | E1000_IVAR_VALID;
329 } 366 }
330 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector; 367 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
331 array_wr32(E1000_IVAR0, index, ivar); 368 array_wr32(E1000_IVAR0, index, ivar);
332 } 369 }
333 if (tx_queue > IGB_N0_QUEUE) { 370 if (tx_queue > IGB_N0_QUEUE) {
334 index = (tx_queue & 0x7); 371 index = (tx_queue >> 1);
335 ivar = array_rd32(E1000_IVAR0, index); 372 ivar = array_rd32(E1000_IVAR0, index);
336 if (tx_queue < 8) { 373 if (tx_queue & 0x1) {
337 /* vector goes into second byte of register */
338 ivar = ivar & 0xFFFF00FF;
339 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
340 } else {
341 /* vector goes into high byte of register */ 374 /* vector goes into high byte of register */
342 ivar = ivar & 0x00FFFFFF; 375 ivar = ivar & 0x00FFFFFF;
343 ivar |= (msix_vector | E1000_IVAR_VALID) << 24; 376 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
377 } else {
378 /* vector goes into second byte of register */
379 ivar = ivar & 0xFFFF00FF;
380 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
344 } 381 }
345 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector; 382 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
346 array_wr32(E1000_IVAR0, index, ivar); 383 array_wr32(E1000_IVAR0, index, ivar);
@@ -445,7 +482,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
445 482
446 for (i = 0; i < adapter->num_tx_queues; i++) { 483 for (i = 0; i < adapter->num_tx_queues; i++) {
447 struct igb_ring *ring = &(adapter->tx_ring[i]); 484 struct igb_ring *ring = &(adapter->tx_ring[i]);
448 sprintf(ring->name, "%s-tx%d", netdev->name, i); 485 sprintf(ring->name, "%s-tx-%d", netdev->name, i);
449 err = request_irq(adapter->msix_entries[vector].vector, 486 err = request_irq(adapter->msix_entries[vector].vector,
450 &igb_msix_tx, 0, ring->name, 487 &igb_msix_tx, 0, ring->name,
451 &(adapter->tx_ring[i])); 488 &(adapter->tx_ring[i]));
@@ -458,7 +495,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
458 for (i = 0; i < adapter->num_rx_queues; i++) { 495 for (i = 0; i < adapter->num_rx_queues; i++) {
459 struct igb_ring *ring = &(adapter->rx_ring[i]); 496 struct igb_ring *ring = &(adapter->rx_ring[i]);
460 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 497 if (strlen(netdev->name) < (IFNAMSIZ - 5))
461 sprintf(ring->name, "%s-rx%d", netdev->name, i); 498 sprintf(ring->name, "%s-rx-%d", netdev->name, i);
462 else 499 else
463 memcpy(ring->name, netdev->name, IFNAMSIZ); 500 memcpy(ring->name, netdev->name, IFNAMSIZ);
464 err = request_irq(adapter->msix_entries[vector].vector, 501 err = request_irq(adapter->msix_entries[vector].vector,
@@ -931,8 +968,7 @@ void igb_reset(struct igb_adapter *adapter)
931 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); 968 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
932 969
933 igb_reset_adaptive(&adapter->hw); 970 igb_reset_adaptive(&adapter->hw);
934 if (adapter->hw.phy.ops.get_phy_info) 971 igb_get_phy_info(&adapter->hw);
935 adapter->hw.phy.ops.get_phy_info(&adapter->hw);
936} 972}
937 973
938/** 974/**
@@ -950,6 +986,25 @@ static int igb_is_need_ioport(struct pci_dev *pdev)
950 } 986 }
951} 987}
952 988
989static const struct net_device_ops igb_netdev_ops = {
990 .ndo_open = igb_open,
991 .ndo_stop = igb_close,
992 .ndo_start_xmit = igb_xmit_frame_adv,
993 .ndo_get_stats = igb_get_stats,
994 .ndo_set_multicast_list = igb_set_multi,
995 .ndo_set_mac_address = igb_set_mac,
996 .ndo_change_mtu = igb_change_mtu,
997 .ndo_do_ioctl = igb_ioctl,
998 .ndo_tx_timeout = igb_tx_timeout,
999 .ndo_validate_addr = eth_validate_addr,
1000 .ndo_vlan_rx_register = igb_vlan_rx_register,
1001 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1002 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1003#ifdef CONFIG_NET_POLL_CONTROLLER
1004 .ndo_poll_controller = igb_netpoll,
1005#endif
1006};
1007
953/** 1008/**
954 * igb_probe - Device Initialization Routine 1009 * igb_probe - Device Initialization Routine
955 * @pdev: PCI device information struct 1010 * @pdev: PCI device information struct
@@ -1031,6 +1086,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1031 if (err) 1086 if (err)
1032 goto err_pci_reg; 1087 goto err_pci_reg;
1033 1088
1089 err = pci_enable_pcie_error_reporting(pdev);
1090 if (err) {
1091 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
1092 "0x%x\n", err);
1093 /* non-fatal, continue */
1094 }
1095
1034 pci_set_master(pdev); 1096 pci_set_master(pdev);
1035 pci_save_state(pdev); 1097 pci_save_state(pdev);
1036 1098
@@ -1059,23 +1121,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1059 if (!adapter->hw.hw_addr) 1121 if (!adapter->hw.hw_addr)
1060 goto err_ioremap; 1122 goto err_ioremap;
1061 1123
1062 netdev->open = &igb_open; 1124 netdev->netdev_ops = &igb_netdev_ops;
1063 netdev->stop = &igb_close;
1064 netdev->get_stats = &igb_get_stats;
1065 netdev->set_multicast_list = &igb_set_multi;
1066 netdev->set_mac_address = &igb_set_mac;
1067 netdev->change_mtu = &igb_change_mtu;
1068 netdev->do_ioctl = &igb_ioctl;
1069 igb_set_ethtool_ops(netdev); 1125 igb_set_ethtool_ops(netdev);
1070 netdev->tx_timeout = &igb_tx_timeout;
1071 netdev->watchdog_timeo = 5 * HZ; 1126 netdev->watchdog_timeo = 5 * HZ;
1072 netdev->vlan_rx_register = igb_vlan_rx_register;
1073 netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
1074 netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
1075#ifdef CONFIG_NET_POLL_CONTROLLER
1076 netdev->poll_controller = igb_netpoll;
1077#endif
1078 netdev->hard_start_xmit = &igb_xmit_frame_adv;
1079 1127
1080 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1128 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1081 1129
@@ -1275,16 +1323,14 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1275 1323
1276 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 1324 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1277 /* print bus type/speed/width info */ 1325 /* print bus type/speed/width info */
1278 dev_info(&pdev->dev, 1326 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1279 "%s: (PCIe:%s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n",
1280 netdev->name, 1327 netdev->name,
1281 ((hw->bus.speed == e1000_bus_speed_2500) 1328 ((hw->bus.speed == e1000_bus_speed_2500)
1282 ? "2.5Gb/s" : "unknown"), 1329 ? "2.5Gb/s" : "unknown"),
1283 ((hw->bus.width == e1000_bus_width_pcie_x4) 1330 ((hw->bus.width == e1000_bus_width_pcie_x4)
1284 ? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x1) 1331 ? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x1)
1285 ? "Width x1" : "unknown"), 1332 ? "Width x1" : "unknown"),
1286 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], 1333 netdev->dev_addr);
1287 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
1288 1334
1289 igb_read_part_num(hw, &part_num); 1335 igb_read_part_num(hw, &part_num);
1290 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name, 1336 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
@@ -1302,7 +1348,7 @@ err_register:
1302 igb_release_hw_control(adapter); 1348 igb_release_hw_control(adapter);
1303err_eeprom: 1349err_eeprom:
1304 if (!igb_check_reset_block(hw)) 1350 if (!igb_check_reset_block(hw))
1305 hw->phy.ops.reset_phy(hw); 1351 igb_reset_phy(hw);
1306 1352
1307 if (hw->flash_address) 1353 if (hw->flash_address)
1308 iounmap(hw->flash_address); 1354 iounmap(hw->flash_address);
@@ -1338,6 +1384,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1338#ifdef CONFIG_IGB_DCA 1384#ifdef CONFIG_IGB_DCA
1339 struct e1000_hw *hw = &adapter->hw; 1385 struct e1000_hw *hw = &adapter->hw;
1340#endif 1386#endif
1387 int err;
1341 1388
1342 /* flush_scheduled work may reschedule our watchdog task, so 1389 /* flush_scheduled work may reschedule our watchdog task, so
1343 * explicitly disable watchdog tasks from being rescheduled */ 1390 * explicitly disable watchdog tasks from being rescheduled */
@@ -1362,9 +1409,8 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1362 1409
1363 unregister_netdev(netdev); 1410 unregister_netdev(netdev);
1364 1411
1365 if (adapter->hw.phy.ops.reset_phy && 1412 if (!igb_check_reset_block(&adapter->hw))
1366 !igb_check_reset_block(&adapter->hw)) 1413 igb_reset_phy(&adapter->hw);
1367 adapter->hw.phy.ops.reset_phy(&adapter->hw);
1368 1414
1369 igb_remove_device(&adapter->hw); 1415 igb_remove_device(&adapter->hw);
1370 igb_reset_interrupt_capability(adapter); 1416 igb_reset_interrupt_capability(adapter);
@@ -1378,6 +1424,11 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1378 1424
1379 free_netdev(netdev); 1425 free_netdev(netdev);
1380 1426
1427 err = pci_disable_pcie_error_reporting(pdev);
1428 if (err)
1429 dev_err(&pdev->dev,
1430 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
1431
1381 pci_disable_device(pdev); 1432 pci_disable_device(pdev);
1382} 1433}
1383 1434
@@ -1397,6 +1448,8 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
1397 1448
1398 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); 1449 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1399 1450
1451 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1452 adapter->rx_ring_count = IGB_DEFAULT_RXD;
1400 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1453 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1401 adapter->rx_ps_hdr_size = 0; /* disable packet split */ 1454 adapter->rx_ps_hdr_size = 0; /* disable packet split */
1402 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1455 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
@@ -1558,8 +1611,7 @@ int igb_setup_tx_resources(struct igb_adapter *adapter,
1558 memset(tx_ring->buffer_info, 0, size); 1611 memset(tx_ring->buffer_info, 0, size);
1559 1612
1560 /* round up to nearest 4K */ 1613 /* round up to nearest 4K */
1561 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc) 1614 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
1562 + sizeof(u32);
1563 tx_ring->size = ALIGN(tx_ring->size, 4096); 1615 tx_ring->size = ALIGN(tx_ring->size, 4096);
1564 1616
1565 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 1617 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
@@ -1618,43 +1670,37 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1618 **/ 1670 **/
1619static void igb_configure_tx(struct igb_adapter *adapter) 1671static void igb_configure_tx(struct igb_adapter *adapter)
1620{ 1672{
1621 u64 tdba, tdwba; 1673 u64 tdba;
1622 struct e1000_hw *hw = &adapter->hw; 1674 struct e1000_hw *hw = &adapter->hw;
1623 u32 tctl; 1675 u32 tctl;
1624 u32 txdctl, txctrl; 1676 u32 txdctl, txctrl;
1625 int i; 1677 int i, j;
1626 1678
1627 for (i = 0; i < adapter->num_tx_queues; i++) { 1679 for (i = 0; i < adapter->num_tx_queues; i++) {
1628 struct igb_ring *ring = &(adapter->tx_ring[i]); 1680 struct igb_ring *ring = &(adapter->tx_ring[i]);
1629 1681 j = ring->reg_idx;
1630 wr32(E1000_TDLEN(i), 1682 wr32(E1000_TDLEN(j),
1631 ring->count * sizeof(struct e1000_tx_desc)); 1683 ring->count * sizeof(struct e1000_tx_desc));
1632 tdba = ring->dma; 1684 tdba = ring->dma;
1633 wr32(E1000_TDBAL(i), 1685 wr32(E1000_TDBAL(j),
1634 tdba & 0x00000000ffffffffULL); 1686 tdba & 0x00000000ffffffffULL);
1635 wr32(E1000_TDBAH(i), tdba >> 32); 1687 wr32(E1000_TDBAH(j), tdba >> 32);
1636
1637 tdwba = ring->dma + ring->count * sizeof(struct e1000_tx_desc);
1638 tdwba |= 1; /* enable head wb */
1639 wr32(E1000_TDWBAL(i),
1640 tdwba & 0x00000000ffffffffULL);
1641 wr32(E1000_TDWBAH(i), tdwba >> 32);
1642 1688
1643 ring->head = E1000_TDH(i); 1689 ring->head = E1000_TDH(j);
1644 ring->tail = E1000_TDT(i); 1690 ring->tail = E1000_TDT(j);
1645 writel(0, hw->hw_addr + ring->tail); 1691 writel(0, hw->hw_addr + ring->tail);
1646 writel(0, hw->hw_addr + ring->head); 1692 writel(0, hw->hw_addr + ring->head);
1647 txdctl = rd32(E1000_TXDCTL(i)); 1693 txdctl = rd32(E1000_TXDCTL(j));
1648 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 1694 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1649 wr32(E1000_TXDCTL(i), txdctl); 1695 wr32(E1000_TXDCTL(j), txdctl);
1650 1696
1651 /* Turn off Relaxed Ordering on head write-backs. The 1697 /* Turn off Relaxed Ordering on head write-backs. The
1652 * writebacks MUST be delivered in order or it will 1698 * writebacks MUST be delivered in order or it will
1653 * completely screw up our bookeeping. 1699 * completely screw up our bookeeping.
1654 */ 1700 */
1655 txctrl = rd32(E1000_DCA_TXCTRL(i)); 1701 txctrl = rd32(E1000_DCA_TXCTRL(j));
1656 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 1702 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1657 wr32(E1000_DCA_TXCTRL(i), txctrl); 1703 wr32(E1000_DCA_TXCTRL(j), txctrl);
1658 } 1704 }
1659 1705
1660 1706
@@ -1771,14 +1817,14 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1771 struct e1000_hw *hw = &adapter->hw; 1817 struct e1000_hw *hw = &adapter->hw;
1772 u32 rctl; 1818 u32 rctl;
1773 u32 srrctl = 0; 1819 u32 srrctl = 0;
1774 int i; 1820 int i, j;
1775 1821
1776 rctl = rd32(E1000_RCTL); 1822 rctl = rd32(E1000_RCTL);
1777 1823
1778 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 1824 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1825 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1779 1826
1780 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | 1827 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
1781 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1782 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 1828 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1783 1829
1784 /* 1830 /*
@@ -1788,38 +1834,26 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1788 */ 1834 */
1789 rctl |= E1000_RCTL_SECRC; 1835 rctl |= E1000_RCTL_SECRC;
1790 1836
1791 rctl &= ~E1000_RCTL_SBP; 1837 /*
1838 * disable store bad packets, long packet enable, and clear size bits.
1839 */
1840 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_LPE | E1000_RCTL_SZ_256);
1792 1841
1793 if (adapter->netdev->mtu <= ETH_DATA_LEN) 1842 if (adapter->netdev->mtu > ETH_DATA_LEN)
1794 rctl &= ~E1000_RCTL_LPE;
1795 else
1796 rctl |= E1000_RCTL_LPE; 1843 rctl |= E1000_RCTL_LPE;
1797 if (adapter->rx_buffer_len <= IGB_RXBUFFER_2048) { 1844
1798 /* Setup buffer sizes */ 1845 /* Setup buffer sizes */
1799 rctl &= ~E1000_RCTL_SZ_4096; 1846 switch (adapter->rx_buffer_len) {
1800 rctl |= E1000_RCTL_BSEX; 1847 case IGB_RXBUFFER_256:
1801 switch (adapter->rx_buffer_len) { 1848 rctl |= E1000_RCTL_SZ_256;
1802 case IGB_RXBUFFER_256: 1849 break;
1803 rctl |= E1000_RCTL_SZ_256; 1850 case IGB_RXBUFFER_512:
1804 rctl &= ~E1000_RCTL_BSEX; 1851 rctl |= E1000_RCTL_SZ_512;
1805 break; 1852 break;
1806 case IGB_RXBUFFER_512: 1853 default:
1807 rctl |= E1000_RCTL_SZ_512; 1854 srrctl = ALIGN(adapter->rx_buffer_len, 1024)
1808 rctl &= ~E1000_RCTL_BSEX; 1855 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
1809 break; 1856 break;
1810 case IGB_RXBUFFER_1024:
1811 rctl |= E1000_RCTL_SZ_1024;
1812 rctl &= ~E1000_RCTL_BSEX;
1813 break;
1814 case IGB_RXBUFFER_2048:
1815 default:
1816 rctl |= E1000_RCTL_SZ_2048;
1817 rctl &= ~E1000_RCTL_BSEX;
1818 break;
1819 }
1820 } else {
1821 rctl &= ~E1000_RCTL_BSEX;
1822 srrctl = adapter->rx_buffer_len >> E1000_SRRCTL_BSIZEPKT_SHIFT;
1823 } 1857 }
1824 1858
1825 /* 82575 and greater support packet-split where the protocol 1859 /* 82575 and greater support packet-split where the protocol
@@ -1841,8 +1875,10 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1841 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 1875 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1842 } 1876 }
1843 1877
1844 for (i = 0; i < adapter->num_rx_queues; i++) 1878 for (i = 0; i < adapter->num_rx_queues; i++) {
1845 wr32(E1000_SRRCTL(i), srrctl); 1879 j = adapter->rx_ring[i].reg_idx;
1880 wr32(E1000_SRRCTL(j), srrctl);
1881 }
1846 1882
1847 wr32(E1000_RCTL, rctl); 1883 wr32(E1000_RCTL, rctl);
1848} 1884}
@@ -1859,7 +1895,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1859 struct e1000_hw *hw = &adapter->hw; 1895 struct e1000_hw *hw = &adapter->hw;
1860 u32 rctl, rxcsum; 1896 u32 rctl, rxcsum;
1861 u32 rxdctl; 1897 u32 rxdctl;
1862 int i; 1898 int i, j;
1863 1899
1864 /* disable receives while setting up the descriptors */ 1900 /* disable receives while setting up the descriptors */
1865 rctl = rd32(E1000_RCTL); 1901 rctl = rd32(E1000_RCTL);
@@ -1874,25 +1910,26 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1874 * the Base and Length of the Rx Descriptor Ring */ 1910 * the Base and Length of the Rx Descriptor Ring */
1875 for (i = 0; i < adapter->num_rx_queues; i++) { 1911 for (i = 0; i < adapter->num_rx_queues; i++) {
1876 struct igb_ring *ring = &(adapter->rx_ring[i]); 1912 struct igb_ring *ring = &(adapter->rx_ring[i]);
1913 j = ring->reg_idx;
1877 rdba = ring->dma; 1914 rdba = ring->dma;
1878 wr32(E1000_RDBAL(i), 1915 wr32(E1000_RDBAL(j),
1879 rdba & 0x00000000ffffffffULL); 1916 rdba & 0x00000000ffffffffULL);
1880 wr32(E1000_RDBAH(i), rdba >> 32); 1917 wr32(E1000_RDBAH(j), rdba >> 32);
1881 wr32(E1000_RDLEN(i), 1918 wr32(E1000_RDLEN(j),
1882 ring->count * sizeof(union e1000_adv_rx_desc)); 1919 ring->count * sizeof(union e1000_adv_rx_desc));
1883 1920
1884 ring->head = E1000_RDH(i); 1921 ring->head = E1000_RDH(j);
1885 ring->tail = E1000_RDT(i); 1922 ring->tail = E1000_RDT(j);
1886 writel(0, hw->hw_addr + ring->tail); 1923 writel(0, hw->hw_addr + ring->tail);
1887 writel(0, hw->hw_addr + ring->head); 1924 writel(0, hw->hw_addr + ring->head);
1888 1925
1889 rxdctl = rd32(E1000_RXDCTL(i)); 1926 rxdctl = rd32(E1000_RXDCTL(j));
1890 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 1927 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1891 rxdctl &= 0xFFF00000; 1928 rxdctl &= 0xFFF00000;
1892 rxdctl |= IGB_RX_PTHRESH; 1929 rxdctl |= IGB_RX_PTHRESH;
1893 rxdctl |= IGB_RX_HTHRESH << 8; 1930 rxdctl |= IGB_RX_HTHRESH << 8;
1894 rxdctl |= IGB_RX_WTHRESH << 16; 1931 rxdctl |= IGB_RX_WTHRESH << 16;
1895 wr32(E1000_RXDCTL(i), rxdctl); 1932 wr32(E1000_RXDCTL(j), rxdctl);
1896#ifdef CONFIG_IGB_LRO 1933#ifdef CONFIG_IGB_LRO
1897 /* Intitial LRO Settings */ 1934 /* Intitial LRO Settings */
1898 ring->lro_mgr.max_aggr = MAX_LRO_AGGR; 1935 ring->lro_mgr.max_aggr = MAX_LRO_AGGR;
@@ -1922,7 +1959,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1922 shift = 6; 1959 shift = 6;
1923 for (j = 0; j < (32 * 4); j++) { 1960 for (j = 0; j < (32 * 4); j++) {
1924 reta.bytes[j & 3] = 1961 reta.bytes[j & 3] =
1925 (j % adapter->num_rx_queues) << shift; 1962 adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
1926 if ((j & 3) == 3) 1963 if ((j & 3) == 3)
1927 writel(reta.dword, 1964 writel(reta.dword,
1928 hw->hw_addr + E1000_RETA(0) + (j & ~3)); 1965 hw->hw_addr + E1000_RETA(0) + (j & ~3));
@@ -1984,7 +2021,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1984 * 2021 *
1985 * Free all transmit software resources 2022 * Free all transmit software resources
1986 **/ 2023 **/
1987static void igb_free_tx_resources(struct igb_ring *tx_ring) 2024void igb_free_tx_resources(struct igb_ring *tx_ring)
1988{ 2025{
1989 struct pci_dev *pdev = tx_ring->adapter->pdev; 2026 struct pci_dev *pdev = tx_ring->adapter->pdev;
1990 2027
@@ -2082,7 +2119,7 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2082 * 2119 *
2083 * Free all receive software resources 2120 * Free all receive software resources
2084 **/ 2121 **/
2085static void igb_free_rx_resources(struct igb_ring *rx_ring) 2122void igb_free_rx_resources(struct igb_ring *rx_ring)
2086{ 2123{
2087 struct pci_dev *pdev = rx_ring->adapter->pdev; 2124 struct pci_dev *pdev = rx_ring->adapter->pdev;
2088 2125
@@ -2274,8 +2311,7 @@ static void igb_set_multi(struct net_device *netdev)
2274static void igb_update_phy_info(unsigned long data) 2311static void igb_update_phy_info(unsigned long data)
2275{ 2312{
2276 struct igb_adapter *adapter = (struct igb_adapter *) data; 2313 struct igb_adapter *adapter = (struct igb_adapter *) data;
2277 if (adapter->hw.phy.ops.get_phy_info) 2314 igb_get_phy_info(&adapter->hw);
2278 adapter->hw.phy.ops.get_phy_info(&adapter->hw);
2279} 2315}
2280 2316
2281/** 2317/**
@@ -2330,9 +2366,10 @@ static void igb_watchdog_task(struct work_struct *work)
2330 &adapter->link_duplex); 2366 &adapter->link_duplex);
2331 2367
2332 ctrl = rd32(E1000_CTRL); 2368 ctrl = rd32(E1000_CTRL);
2333 dev_info(&adapter->pdev->dev, 2369 /* Links status message must follow this format */
2334 "NIC Link is Up %d Mbps %s, " 2370 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
2335 "Flow Control: %s\n", 2371 "Flow Control: %s\n",
2372 netdev->name,
2336 adapter->link_speed, 2373 adapter->link_speed,
2337 adapter->link_duplex == FULL_DUPLEX ? 2374 adapter->link_duplex == FULL_DUPLEX ?
2338 "Full Duplex" : "Half Duplex", 2375 "Full Duplex" : "Half Duplex",
@@ -2367,7 +2404,9 @@ static void igb_watchdog_task(struct work_struct *work)
2367 if (netif_carrier_ok(netdev)) { 2404 if (netif_carrier_ok(netdev)) {
2368 adapter->link_speed = 0; 2405 adapter->link_speed = 0;
2369 adapter->link_duplex = 0; 2406 adapter->link_duplex = 0;
2370 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); 2407 /* Links status message must follow this format */
2408 printk(KERN_INFO "igb: %s NIC Link is Down\n",
2409 netdev->name);
2371 netif_carrier_off(netdev); 2410 netif_carrier_off(netdev);
2372 netif_tx_stop_all_queues(netdev); 2411 netif_tx_stop_all_queues(netdev);
2373 if (!test_bit(__IGB_DOWN, &adapter->state)) 2412 if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -2703,6 +2742,7 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
2703 context_desc->seqnum_seed = 0; 2742 context_desc->seqnum_seed = 0;
2704 2743
2705 buffer_info->time_stamp = jiffies; 2744 buffer_info->time_stamp = jiffies;
2745 buffer_info->next_to_watch = i;
2706 buffer_info->dma = 0; 2746 buffer_info->dma = 0;
2707 i++; 2747 i++;
2708 if (i == tx_ring->count) 2748 if (i == tx_ring->count)
@@ -2766,6 +2806,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2766 cpu_to_le32(tx_ring->queue_index << 4); 2806 cpu_to_le32(tx_ring->queue_index << 4);
2767 2807
2768 buffer_info->time_stamp = jiffies; 2808 buffer_info->time_stamp = jiffies;
2809 buffer_info->next_to_watch = i;
2769 buffer_info->dma = 0; 2810 buffer_info->dma = 0;
2770 2811
2771 i++; 2812 i++;
@@ -2784,8 +2825,8 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2784#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) 2825#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
2785 2826
2786static inline int igb_tx_map_adv(struct igb_adapter *adapter, 2827static inline int igb_tx_map_adv(struct igb_adapter *adapter,
2787 struct igb_ring *tx_ring, 2828 struct igb_ring *tx_ring, struct sk_buff *skb,
2788 struct sk_buff *skb) 2829 unsigned int first)
2789{ 2830{
2790 struct igb_buffer *buffer_info; 2831 struct igb_buffer *buffer_info;
2791 unsigned int len = skb_headlen(skb); 2832 unsigned int len = skb_headlen(skb);
@@ -2799,6 +2840,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
2799 buffer_info->length = len; 2840 buffer_info->length = len;
2800 /* set time_stamp *before* dma to help avoid a possible race */ 2841 /* set time_stamp *before* dma to help avoid a possible race */
2801 buffer_info->time_stamp = jiffies; 2842 buffer_info->time_stamp = jiffies;
2843 buffer_info->next_to_watch = i;
2802 buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len, 2844 buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len,
2803 PCI_DMA_TODEVICE); 2845 PCI_DMA_TODEVICE);
2804 count++; 2846 count++;
@@ -2816,6 +2858,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
2816 BUG_ON(len >= IGB_MAX_DATA_PER_TXD); 2858 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
2817 buffer_info->length = len; 2859 buffer_info->length = len;
2818 buffer_info->time_stamp = jiffies; 2860 buffer_info->time_stamp = jiffies;
2861 buffer_info->next_to_watch = i;
2819 buffer_info->dma = pci_map_page(adapter->pdev, 2862 buffer_info->dma = pci_map_page(adapter->pdev,
2820 frag->page, 2863 frag->page,
2821 frag->page_offset, 2864 frag->page_offset,
@@ -2828,8 +2871,9 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
2828 i = 0; 2871 i = 0;
2829 } 2872 }
2830 2873
2831 i = (i == 0) ? tx_ring->count - 1 : i - 1; 2874 i = ((i == 0) ? tx_ring->count - 1 : i - 1);
2832 tx_ring->buffer_info[i].skb = skb; 2875 tx_ring->buffer_info[i].skb = skb;
2876 tx_ring->buffer_info[first].next_to_watch = i;
2833 2877
2834 return count; 2878 return count;
2835} 2879}
@@ -2936,6 +2980,7 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2936 struct igb_ring *tx_ring) 2980 struct igb_ring *tx_ring)
2937{ 2981{
2938 struct igb_adapter *adapter = netdev_priv(netdev); 2982 struct igb_adapter *adapter = netdev_priv(netdev);
2983 unsigned int first;
2939 unsigned int tx_flags = 0; 2984 unsigned int tx_flags = 0;
2940 unsigned int len; 2985 unsigned int len;
2941 u8 hdr_len = 0; 2986 u8 hdr_len = 0;
@@ -2972,6 +3017,8 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2972 if (skb->protocol == htons(ETH_P_IP)) 3017 if (skb->protocol == htons(ETH_P_IP))
2973 tx_flags |= IGB_TX_FLAGS_IPV4; 3018 tx_flags |= IGB_TX_FLAGS_IPV4;
2974 3019
3020 first = tx_ring->next_to_use;
3021
2975 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags, 3022 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
2976 &hdr_len) : 0; 3023 &hdr_len) : 0;
2977 3024
@@ -2987,7 +3034,7 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2987 tx_flags |= IGB_TX_FLAGS_CSUM; 3034 tx_flags |= IGB_TX_FLAGS_CSUM;
2988 3035
2989 igb_tx_queue_adv(adapter, tx_ring, tx_flags, 3036 igb_tx_queue_adv(adapter, tx_ring, tx_flags,
2990 igb_tx_map_adv(adapter, tx_ring, skb), 3037 igb_tx_map_adv(adapter, tx_ring, skb, first),
2991 skb->len, hdr_len); 3038 skb->len, hdr_len);
2992 3039
2993 netdev->trans_start = jiffies; 3040 netdev->trans_start = jiffies;
@@ -3249,7 +3296,7 @@ void igb_update_stats(struct igb_adapter *adapter)
3249 /* Phy Stats */ 3296 /* Phy Stats */
3250 if (hw->phy.media_type == e1000_media_type_copper) { 3297 if (hw->phy.media_type == e1000_media_type_copper) {
3251 if ((adapter->link_speed == SPEED_1000) && 3298 if ((adapter->link_speed == SPEED_1000) &&
3252 (!hw->phy.ops.read_phy_reg(hw, PHY_1000T_STATUS, 3299 (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
3253 &phy_tmp))) { 3300 &phy_tmp))) {
3254 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3301 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3255 adapter->phy_stats.idle_errors += phy_tmp; 3302 adapter->phy_stats.idle_errors += phy_tmp;
@@ -3332,7 +3379,6 @@ static void igb_write_itr(struct igb_ring *ring)
3332static irqreturn_t igb_msix_rx(int irq, void *data) 3379static irqreturn_t igb_msix_rx(int irq, void *data)
3333{ 3380{
3334 struct igb_ring *rx_ring = data; 3381 struct igb_ring *rx_ring = data;
3335 struct igb_adapter *adapter = rx_ring->adapter;
3336 3382
3337 /* Write the ITR value calculated at the end of the 3383 /* Write the ITR value calculated at the end of the
3338 * previous interrupt. 3384 * previous interrupt.
@@ -3340,11 +3386,11 @@ static irqreturn_t igb_msix_rx(int irq, void *data)
3340 3386
3341 igb_write_itr(rx_ring); 3387 igb_write_itr(rx_ring);
3342 3388
3343 if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) 3389 if (netif_rx_schedule_prep(&rx_ring->napi))
3344 __netif_rx_schedule(adapter->netdev, &rx_ring->napi); 3390 __netif_rx_schedule(&rx_ring->napi);
3345 3391
3346#ifdef CONFIG_IGB_DCA 3392#ifdef CONFIG_IGB_DCA
3347 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3393 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
3348 igb_update_rx_dca(rx_ring); 3394 igb_update_rx_dca(rx_ring);
3349#endif 3395#endif
3350 return IRQ_HANDLED; 3396 return IRQ_HANDLED;
@@ -3357,7 +3403,7 @@ static void igb_update_rx_dca(struct igb_ring *rx_ring)
3357 struct igb_adapter *adapter = rx_ring->adapter; 3403 struct igb_adapter *adapter = rx_ring->adapter;
3358 struct e1000_hw *hw = &adapter->hw; 3404 struct e1000_hw *hw = &adapter->hw;
3359 int cpu = get_cpu(); 3405 int cpu = get_cpu();
3360 int q = rx_ring - adapter->rx_ring; 3406 int q = rx_ring->reg_idx;
3361 3407
3362 if (rx_ring->cpu != cpu) { 3408 if (rx_ring->cpu != cpu) {
3363 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); 3409 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
@@ -3384,7 +3430,7 @@ static void igb_update_tx_dca(struct igb_ring *tx_ring)
3384 struct igb_adapter *adapter = tx_ring->adapter; 3430 struct igb_adapter *adapter = tx_ring->adapter;
3385 struct e1000_hw *hw = &adapter->hw; 3431 struct e1000_hw *hw = &adapter->hw;
3386 int cpu = get_cpu(); 3432 int cpu = get_cpu();
3387 int q = tx_ring - adapter->tx_ring; 3433 int q = tx_ring->reg_idx;
3388 3434
3389 if (tx_ring->cpu != cpu) { 3435 if (tx_ring->cpu != cpu) {
3390 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); 3436 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
@@ -3493,7 +3539,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
3493 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3539 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3494 } 3540 }
3495 3541
3496 netif_rx_schedule(netdev, &adapter->rx_ring[0].napi); 3542 netif_rx_schedule(&adapter->rx_ring[0].napi);
3497 3543
3498 return IRQ_HANDLED; 3544 return IRQ_HANDLED;
3499} 3545}
@@ -3531,7 +3577,7 @@ static irqreturn_t igb_intr(int irq, void *data)
3531 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3577 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3532 } 3578 }
3533 3579
3534 netif_rx_schedule(netdev, &adapter->rx_ring[0].napi); 3580 netif_rx_schedule(&adapter->rx_ring[0].napi);
3535 3581
3536 return IRQ_HANDLED; 3582 return IRQ_HANDLED;
3537} 3583}
@@ -3566,7 +3612,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
3566 !netif_running(netdev)) { 3612 !netif_running(netdev)) {
3567 if (adapter->itr_setting & 3) 3613 if (adapter->itr_setting & 3)
3568 igb_set_itr(adapter); 3614 igb_set_itr(adapter);
3569 netif_rx_complete(netdev, napi); 3615 netif_rx_complete(napi);
3570 if (!test_bit(__IGB_DOWN, &adapter->state)) 3616 if (!test_bit(__IGB_DOWN, &adapter->state))
3571 igb_irq_enable(adapter); 3617 igb_irq_enable(adapter);
3572 return 0; 3618 return 0;
@@ -3592,7 +3638,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3592 3638
3593 /* If not enough Rx work done, exit the polling mode */ 3639 /* If not enough Rx work done, exit the polling mode */
3594 if ((work_done == 0) || !netif_running(netdev)) { 3640 if ((work_done == 0) || !netif_running(netdev)) {
3595 netif_rx_complete(netdev, napi); 3641 netif_rx_complete(napi);
3596 3642
3597 if (adapter->itr_setting & 3) { 3643 if (adapter->itr_setting & 3) {
3598 if (adapter->num_rx_queues == 1) 3644 if (adapter->num_rx_queues == 1)
@@ -3610,12 +3656,6 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3610 return 1; 3656 return 1;
3611} 3657}
3612 3658
3613static inline u32 get_head(struct igb_ring *tx_ring)
3614{
3615 void *end = (struct e1000_tx_desc *)tx_ring->desc + tx_ring->count;
3616 return le32_to_cpu(*(volatile __le32 *)end);
3617}
3618
3619/** 3659/**
3620 * igb_clean_tx_irq - Reclaim resources after transmit completes 3660 * igb_clean_tx_irq - Reclaim resources after transmit completes
3621 * @adapter: board private structure 3661 * @adapter: board private structure
@@ -3624,24 +3664,25 @@ static inline u32 get_head(struct igb_ring *tx_ring)
3624static bool igb_clean_tx_irq(struct igb_ring *tx_ring) 3664static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3625{ 3665{
3626 struct igb_adapter *adapter = tx_ring->adapter; 3666 struct igb_adapter *adapter = tx_ring->adapter;
3627 struct e1000_hw *hw = &adapter->hw;
3628 struct net_device *netdev = adapter->netdev; 3667 struct net_device *netdev = adapter->netdev;
3629 struct e1000_tx_desc *tx_desc; 3668 struct e1000_hw *hw = &adapter->hw;
3630 struct igb_buffer *buffer_info; 3669 struct igb_buffer *buffer_info;
3631 struct sk_buff *skb; 3670 struct sk_buff *skb;
3632 unsigned int i; 3671 union e1000_adv_tx_desc *tx_desc, *eop_desc;
3633 u32 head, oldhead;
3634 unsigned int count = 0;
3635 unsigned int total_bytes = 0, total_packets = 0; 3672 unsigned int total_bytes = 0, total_packets = 0;
3636 bool retval = true; 3673 unsigned int i, eop, count = 0;
3674 bool cleaned = false;
3637 3675
3638 rmb();
3639 head = get_head(tx_ring);
3640 i = tx_ring->next_to_clean; 3676 i = tx_ring->next_to_clean;
3641 while (1) { 3677 eop = tx_ring->buffer_info[i].next_to_watch;
3642 while (i != head) { 3678 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
3643 tx_desc = E1000_TX_DESC(*tx_ring, i); 3679
3680 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3681 (count < tx_ring->count)) {
3682 for (cleaned = false; !cleaned; count++) {
3683 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3644 buffer_info = &tx_ring->buffer_info[i]; 3684 buffer_info = &tx_ring->buffer_info[i];
3685 cleaned = (i == eop);
3645 skb = buffer_info->skb; 3686 skb = buffer_info->skb;
3646 3687
3647 if (skb) { 3688 if (skb) {
@@ -3656,25 +3697,17 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3656 } 3697 }
3657 3698
3658 igb_unmap_and_free_tx_resource(adapter, buffer_info); 3699 igb_unmap_and_free_tx_resource(adapter, buffer_info);
3700 tx_desc->wb.status = 0;
3659 3701
3660 i++; 3702 i++;
3661 if (i == tx_ring->count) 3703 if (i == tx_ring->count)
3662 i = 0; 3704 i = 0;
3663
3664 count++;
3665 if (count == IGB_MAX_TX_CLEAN) {
3666 retval = false;
3667 goto done_cleaning;
3668 }
3669 } 3705 }
3670 oldhead = head; 3706
3671 rmb(); 3707 eop = tx_ring->buffer_info[i].next_to_watch;
3672 head = get_head(tx_ring); 3708 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
3673 if (head == oldhead) 3709 }
3674 goto done_cleaning; 3710
3675 } /* while (1) */
3676
3677done_cleaning:
3678 tx_ring->next_to_clean = i; 3711 tx_ring->next_to_clean = i;
3679 3712
3680 if (unlikely(count && 3713 if (unlikely(count &&
@@ -3701,7 +3734,6 @@ done_cleaning:
3701 && !(rd32(E1000_STATUS) & 3734 && !(rd32(E1000_STATUS) &
3702 E1000_STATUS_TXOFF)) { 3735 E1000_STATUS_TXOFF)) {
3703 3736
3704 tx_desc = E1000_TX_DESC(*tx_ring, i);
3705 /* detected Tx unit hang */ 3737 /* detected Tx unit hang */
3706 dev_err(&adapter->pdev->dev, 3738 dev_err(&adapter->pdev->dev,
3707 "Detected Tx Unit Hang\n" 3739 "Detected Tx Unit Hang\n"
@@ -3710,9 +3742,9 @@ done_cleaning:
3710 " TDT <%x>\n" 3742 " TDT <%x>\n"
3711 " next_to_use <%x>\n" 3743 " next_to_use <%x>\n"
3712 " next_to_clean <%x>\n" 3744 " next_to_clean <%x>\n"
3713 " head (WB) <%x>\n"
3714 "buffer_info[next_to_clean]\n" 3745 "buffer_info[next_to_clean]\n"
3715 " time_stamp <%lx>\n" 3746 " time_stamp <%lx>\n"
3747 " next_to_watch <%x>\n"
3716 " jiffies <%lx>\n" 3748 " jiffies <%lx>\n"
3717 " desc.status <%x>\n", 3749 " desc.status <%x>\n",
3718 tx_ring->queue_index, 3750 tx_ring->queue_index,
@@ -3720,10 +3752,10 @@ done_cleaning:
3720 readl(adapter->hw.hw_addr + tx_ring->tail), 3752 readl(adapter->hw.hw_addr + tx_ring->tail),
3721 tx_ring->next_to_use, 3753 tx_ring->next_to_use,
3722 tx_ring->next_to_clean, 3754 tx_ring->next_to_clean,
3723 head,
3724 tx_ring->buffer_info[i].time_stamp, 3755 tx_ring->buffer_info[i].time_stamp,
3756 eop,
3725 jiffies, 3757 jiffies,
3726 tx_desc->upper.fields.status); 3758 eop_desc->wb.status);
3727 netif_stop_subqueue(netdev, tx_ring->queue_index); 3759 netif_stop_subqueue(netdev, tx_ring->queue_index);
3728 } 3760 }
3729 } 3761 }
@@ -3733,7 +3765,7 @@ done_cleaning:
3733 tx_ring->tx_stats.packets += total_packets; 3765 tx_ring->tx_stats.packets += total_packets;
3734 adapter->net_stats.tx_bytes += total_bytes; 3766 adapter->net_stats.tx_bytes += total_bytes;
3735 adapter->net_stats.tx_packets += total_packets; 3767 adapter->net_stats.tx_packets += total_packets;
3736 return retval; 3768 return (count < tx_ring->count);
3737} 3769}
3738 3770
3739#ifdef CONFIG_IGB_LRO 3771#ifdef CONFIG_IGB_LRO
@@ -3919,8 +3951,10 @@ send_up:
3919 next_buffer = &rx_ring->buffer_info[i]; 3951 next_buffer = &rx_ring->buffer_info[i];
3920 3952
3921 if (!(staterr & E1000_RXD_STAT_EOP)) { 3953 if (!(staterr & E1000_RXD_STAT_EOP)) {
3922 buffer_info->skb = xchg(&next_buffer->skb, skb); 3954 buffer_info->skb = next_buffer->skb;
3923 buffer_info->dma = xchg(&next_buffer->dma, 0); 3955 buffer_info->dma = next_buffer->dma;
3956 next_buffer->skb = skb;
3957 next_buffer->dma = 0;
3924 goto next_desc; 3958 goto next_desc;
3925 } 3959 }
3926 3960
@@ -3938,8 +3972,6 @@ send_up:
3938 3972
3939 igb_receive_skb(rx_ring, staterr, rx_desc, skb); 3973 igb_receive_skb(rx_ring, staterr, rx_desc, skb);
3940 3974
3941 netdev->last_rx = jiffies;
3942
3943next_desc: 3975next_desc:
3944 rx_desc->wb.upper.status_error = 0; 3976 rx_desc->wb.upper.status_error = 0;
3945 3977
@@ -4102,9 +4134,8 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4102 case SIOCGMIIREG: 4134 case SIOCGMIIREG:
4103 if (!capable(CAP_NET_ADMIN)) 4135 if (!capable(CAP_NET_ADMIN))
4104 return -EPERM; 4136 return -EPERM;
4105 if (adapter->hw.phy.ops.read_phy_reg(&adapter->hw, 4137 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
4106 data->reg_num 4138 &data->val_out))
4107 & 0x1F, &data->val_out))
4108 return -EIO; 4139 return -EIO;
4109 break; 4140 break;
4110 case SIOCSMIIREG: 4141 case SIOCSMIIREG:
@@ -4474,27 +4505,38 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
4474 struct net_device *netdev = pci_get_drvdata(pdev); 4505 struct net_device *netdev = pci_get_drvdata(pdev);
4475 struct igb_adapter *adapter = netdev_priv(netdev); 4506 struct igb_adapter *adapter = netdev_priv(netdev);
4476 struct e1000_hw *hw = &adapter->hw; 4507 struct e1000_hw *hw = &adapter->hw;
4508 pci_ers_result_t result;
4477 int err; 4509 int err;
4478 4510
4479 if (adapter->need_ioport) 4511 if (adapter->need_ioport)
4480 err = pci_enable_device(pdev); 4512 err = pci_enable_device(pdev);
4481 else 4513 else
4482 err = pci_enable_device_mem(pdev); 4514 err = pci_enable_device_mem(pdev);
4515
4483 if (err) { 4516 if (err) {
4484 dev_err(&pdev->dev, 4517 dev_err(&pdev->dev,
4485 "Cannot re-enable PCI device after reset.\n"); 4518 "Cannot re-enable PCI device after reset.\n");
4486 return PCI_ERS_RESULT_DISCONNECT; 4519 result = PCI_ERS_RESULT_DISCONNECT;
4487 } 4520 } else {
4488 pci_set_master(pdev); 4521 pci_set_master(pdev);
4489 pci_restore_state(pdev); 4522 pci_restore_state(pdev);
4490 4523
4491 pci_enable_wake(pdev, PCI_D3hot, 0); 4524 pci_enable_wake(pdev, PCI_D3hot, 0);
4492 pci_enable_wake(pdev, PCI_D3cold, 0); 4525 pci_enable_wake(pdev, PCI_D3cold, 0);
4493 4526
4494 igb_reset(adapter); 4527 igb_reset(adapter);
4495 wr32(E1000_WUS, ~0); 4528 wr32(E1000_WUS, ~0);
4529 result = PCI_ERS_RESULT_RECOVERED;
4530 }
4531
4532 err = pci_cleanup_aer_uncorrect_error_status(pdev);
4533 if (err) {
4534 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
4535 "failed 0x%0x\n", err);
4536 /* non-fatal, continue */
4537 }
4496 4538
4497 return PCI_ERS_RESULT_RECOVERED; 4539 return result;
4498} 4540}
4499 4541
4500/** 4542/**
@@ -4522,7 +4564,6 @@ static void igb_io_resume(struct pci_dev *pdev)
4522 /* let the f/w know that the h/w is now under the control of the 4564 /* let the f/w know that the h/w is now under the control of the
4523 * driver. */ 4565 * driver. */
4524 igb_get_hw_control(adapter); 4566 igb_get_hw_control(adapter);
4525
4526} 4567}
4527 4568
4528/* igb_main.c */ 4569/* igb_main.c */