diff options
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 814 |
1 files changed, 534 insertions, 280 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 438a931fd55d..d0a5d1656c5f 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -43,7 +43,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | |||
43 | #else | 43 | #else |
44 | #define DRIVERNAPI "-NAPI" | 44 | #define DRIVERNAPI "-NAPI" |
45 | #endif | 45 | #endif |
46 | #define DRV_VERSION "6.1.16-k2"DRIVERNAPI | 46 | #define DRV_VERSION "6.3.9-k2"DRIVERNAPI |
47 | char e1000_driver_version[] = DRV_VERSION; | 47 | char e1000_driver_version[] = DRV_VERSION; |
48 | static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; | 48 | static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; |
49 | 49 | ||
@@ -97,7 +97,9 @@ static struct pci_device_id e1000_pci_tbl[] = { | |||
97 | INTEL_E1000_ETHERNET_DEVICE(0x108A), | 97 | INTEL_E1000_ETHERNET_DEVICE(0x108A), |
98 | INTEL_E1000_ETHERNET_DEVICE(0x108B), | 98 | INTEL_E1000_ETHERNET_DEVICE(0x108B), |
99 | INTEL_E1000_ETHERNET_DEVICE(0x108C), | 99 | INTEL_E1000_ETHERNET_DEVICE(0x108C), |
100 | INTEL_E1000_ETHERNET_DEVICE(0x1099), | ||
100 | INTEL_E1000_ETHERNET_DEVICE(0x109A), | 101 | INTEL_E1000_ETHERNET_DEVICE(0x109A), |
102 | INTEL_E1000_ETHERNET_DEVICE(0x10B5), | ||
101 | /* required last entry */ | 103 | /* required last entry */ |
102 | {0,} | 104 | {0,} |
103 | }; | 105 | }; |
@@ -171,9 +173,11 @@ static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
171 | struct e1000_rx_ring *rx_ring); | 173 | struct e1000_rx_ring *rx_ring); |
172 | #endif | 174 | #endif |
173 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | 175 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, |
174 | struct e1000_rx_ring *rx_ring); | 176 | struct e1000_rx_ring *rx_ring, |
177 | int cleaned_count); | ||
175 | static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | 178 | static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, |
176 | struct e1000_rx_ring *rx_ring); | 179 | struct e1000_rx_ring *rx_ring, |
180 | int cleaned_count); | ||
177 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); | 181 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); |
178 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | 182 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, |
179 | int cmd); | 183 | int cmd); |
@@ -319,7 +323,75 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter) | |||
319 | } | 323 | } |
320 | } | 324 | } |
321 | } | 325 | } |
322 | 326 | ||
327 | /** | ||
328 | * e1000_release_hw_control - release control of the h/w to f/w | ||
329 | * @adapter: address of board private structure | ||
330 | * | ||
331 | * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. | ||
332 | * For ASF and Pass Through versions of f/w this means that the | ||
333 | * driver is no longer loaded. For AMT version (only with 82573) i | ||
334 | * of the f/w this means that the netowrk i/f is closed. | ||
335 | * | ||
336 | **/ | ||
337 | |||
338 | static inline void | ||
339 | e1000_release_hw_control(struct e1000_adapter *adapter) | ||
340 | { | ||
341 | uint32_t ctrl_ext; | ||
342 | uint32_t swsm; | ||
343 | |||
344 | /* Let firmware taken over control of h/w */ | ||
345 | switch (adapter->hw.mac_type) { | ||
346 | case e1000_82571: | ||
347 | case e1000_82572: | ||
348 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | ||
349 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, | ||
350 | ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); | ||
351 | break; | ||
352 | case e1000_82573: | ||
353 | swsm = E1000_READ_REG(&adapter->hw, SWSM); | ||
354 | E1000_WRITE_REG(&adapter->hw, SWSM, | ||
355 | swsm & ~E1000_SWSM_DRV_LOAD); | ||
356 | default: | ||
357 | break; | ||
358 | } | ||
359 | } | ||
360 | |||
361 | /** | ||
362 | * e1000_get_hw_control - get control of the h/w from f/w | ||
363 | * @adapter: address of board private structure | ||
364 | * | ||
365 | * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. | ||
366 | * For ASF and Pass Through versions of f/w this means that | ||
367 | * the driver is loaded. For AMT version (only with 82573) | ||
368 | * of the f/w this means that the netowrk i/f is open. | ||
369 | * | ||
370 | **/ | ||
371 | |||
372 | static inline void | ||
373 | e1000_get_hw_control(struct e1000_adapter *adapter) | ||
374 | { | ||
375 | uint32_t ctrl_ext; | ||
376 | uint32_t swsm; | ||
377 | /* Let firmware know the driver has taken over */ | ||
378 | switch (adapter->hw.mac_type) { | ||
379 | case e1000_82571: | ||
380 | case e1000_82572: | ||
381 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | ||
382 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, | ||
383 | ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); | ||
384 | break; | ||
385 | case e1000_82573: | ||
386 | swsm = E1000_READ_REG(&adapter->hw, SWSM); | ||
387 | E1000_WRITE_REG(&adapter->hw, SWSM, | ||
388 | swsm | E1000_SWSM_DRV_LOAD); | ||
389 | break; | ||
390 | default: | ||
391 | break; | ||
392 | } | ||
393 | } | ||
394 | |||
323 | int | 395 | int |
324 | e1000_up(struct e1000_adapter *adapter) | 396 | e1000_up(struct e1000_adapter *adapter) |
325 | { | 397 | { |
@@ -343,8 +415,14 @@ e1000_up(struct e1000_adapter *adapter) | |||
343 | e1000_configure_tx(adapter); | 415 | e1000_configure_tx(adapter); |
344 | e1000_setup_rctl(adapter); | 416 | e1000_setup_rctl(adapter); |
345 | e1000_configure_rx(adapter); | 417 | e1000_configure_rx(adapter); |
346 | for (i = 0; i < adapter->num_queues; i++) | 418 | /* call E1000_DESC_UNUSED which always leaves |
347 | adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]); | 419 | * at least 1 descriptor unused to make sure |
420 | * next_to_use != next_to_clean */ | ||
421 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
422 | struct e1000_rx_ring *ring = &adapter->rx_ring[i]; | ||
423 | adapter->alloc_rx_buf(adapter, ring, | ||
424 | E1000_DESC_UNUSED(ring)); | ||
425 | } | ||
348 | 426 | ||
349 | #ifdef CONFIG_PCI_MSI | 427 | #ifdef CONFIG_PCI_MSI |
350 | if(adapter->hw.mac_type > e1000_82547_rev_2) { | 428 | if(adapter->hw.mac_type > e1000_82547_rev_2) { |
@@ -364,6 +442,12 @@ e1000_up(struct e1000_adapter *adapter) | |||
364 | return err; | 442 | return err; |
365 | } | 443 | } |
366 | 444 | ||
445 | #ifdef CONFIG_E1000_MQ | ||
446 | e1000_setup_queue_mapping(adapter); | ||
447 | #endif | ||
448 | |||
449 | adapter->tx_queue_len = netdev->tx_queue_len; | ||
450 | |||
367 | mod_timer(&adapter->watchdog_timer, jiffies); | 451 | mod_timer(&adapter->watchdog_timer, jiffies); |
368 | 452 | ||
369 | #ifdef CONFIG_E1000_NAPI | 453 | #ifdef CONFIG_E1000_NAPI |
@@ -378,6 +462,8 @@ void | |||
378 | e1000_down(struct e1000_adapter *adapter) | 462 | e1000_down(struct e1000_adapter *adapter) |
379 | { | 463 | { |
380 | struct net_device *netdev = adapter->netdev; | 464 | struct net_device *netdev = adapter->netdev; |
465 | boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) && | ||
466 | e1000_check_mng_mode(&adapter->hw); | ||
381 | 467 | ||
382 | e1000_irq_disable(adapter); | 468 | e1000_irq_disable(adapter); |
383 | #ifdef CONFIG_E1000_MQ | 469 | #ifdef CONFIG_E1000_MQ |
@@ -396,6 +482,7 @@ e1000_down(struct e1000_adapter *adapter) | |||
396 | #ifdef CONFIG_E1000_NAPI | 482 | #ifdef CONFIG_E1000_NAPI |
397 | netif_poll_disable(netdev); | 483 | netif_poll_disable(netdev); |
398 | #endif | 484 | #endif |
485 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
399 | adapter->link_speed = 0; | 486 | adapter->link_speed = 0; |
400 | adapter->link_duplex = 0; | 487 | adapter->link_duplex = 0; |
401 | netif_carrier_off(netdev); | 488 | netif_carrier_off(netdev); |
@@ -405,12 +492,16 @@ e1000_down(struct e1000_adapter *adapter) | |||
405 | e1000_clean_all_tx_rings(adapter); | 492 | e1000_clean_all_tx_rings(adapter); |
406 | e1000_clean_all_rx_rings(adapter); | 493 | e1000_clean_all_rx_rings(adapter); |
407 | 494 | ||
408 | /* If WoL is not enabled and management mode is not IAMT | 495 | /* Power down the PHY so no link is implied when interface is down * |
409 | * Power down the PHY so no link is implied when interface is down */ | 496 | * The PHY cannot be powered down if any of the following is TRUE * |
410 | if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 && | 497 | * (a) WoL is enabled |
498 | * (b) AMT is active | ||
499 | * (c) SoL/IDER session is active */ | ||
500 | if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && | ||
411 | adapter->hw.media_type == e1000_media_type_copper && | 501 | adapter->hw.media_type == e1000_media_type_copper && |
412 | !e1000_check_mng_mode(&adapter->hw) && | 502 | !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) && |
413 | !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) { | 503 | !mng_mode_enabled && |
504 | !e1000_check_phy_reset_block(&adapter->hw)) { | ||
414 | uint16_t mii_reg; | 505 | uint16_t mii_reg; |
415 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); | 506 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); |
416 | mii_reg |= MII_CR_POWER_DOWN; | 507 | mii_reg |= MII_CR_POWER_DOWN; |
@@ -422,10 +513,8 @@ e1000_down(struct e1000_adapter *adapter) | |||
422 | void | 513 | void |
423 | e1000_reset(struct e1000_adapter *adapter) | 514 | e1000_reset(struct e1000_adapter *adapter) |
424 | { | 515 | { |
425 | struct net_device *netdev = adapter->netdev; | ||
426 | uint32_t pba, manc; | 516 | uint32_t pba, manc; |
427 | uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; | 517 | uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; |
428 | uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF; | ||
429 | 518 | ||
430 | /* Repartition Pba for greater than 9k mtu | 519 | /* Repartition Pba for greater than 9k mtu |
431 | * To take effect CTRL.RST is required. | 520 | * To take effect CTRL.RST is required. |
@@ -449,15 +538,8 @@ e1000_reset(struct e1000_adapter *adapter) | |||
449 | } | 538 | } |
450 | 539 | ||
451 | if((adapter->hw.mac_type != e1000_82573) && | 540 | if((adapter->hw.mac_type != e1000_82573) && |
452 | (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) { | 541 | (adapter->netdev->mtu > E1000_RXBUFFER_8192)) |
453 | pba -= 8; /* allocate more FIFO for Tx */ | 542 | pba -= 8; /* allocate more FIFO for Tx */ |
454 | /* send an XOFF when there is enough space in the | ||
455 | * Rx FIFO to hold one extra full size Rx packet | ||
456 | */ | ||
457 | fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE + | ||
458 | ETHERNET_FCS_SIZE + 1; | ||
459 | fc_low_water_mark = fc_high_water_mark + 8; | ||
460 | } | ||
461 | 543 | ||
462 | 544 | ||
463 | if(adapter->hw.mac_type == e1000_82547) { | 545 | if(adapter->hw.mac_type == e1000_82547) { |
@@ -471,10 +553,12 @@ e1000_reset(struct e1000_adapter *adapter) | |||
471 | E1000_WRITE_REG(&adapter->hw, PBA, pba); | 553 | E1000_WRITE_REG(&adapter->hw, PBA, pba); |
472 | 554 | ||
473 | /* flow control settings */ | 555 | /* flow control settings */ |
474 | adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) - | 556 | /* Set the FC high water mark to 90% of the FIFO size. |
475 | fc_high_water_mark; | 557 | * Required to clear last 3 LSB */ |
476 | adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) - | 558 | fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8; |
477 | fc_low_water_mark; | 559 | |
560 | adapter->hw.fc_high_water = fc_high_water_mark; | ||
561 | adapter->hw.fc_low_water = fc_high_water_mark - 8; | ||
478 | adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; | 562 | adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; |
479 | adapter->hw.fc_send_xon = 1; | 563 | adapter->hw.fc_send_xon = 1; |
480 | adapter->hw.fc = adapter->hw.original_fc; | 564 | adapter->hw.fc = adapter->hw.original_fc; |
@@ -517,8 +601,6 @@ e1000_probe(struct pci_dev *pdev, | |||
517 | struct net_device *netdev; | 601 | struct net_device *netdev; |
518 | struct e1000_adapter *adapter; | 602 | struct e1000_adapter *adapter; |
519 | unsigned long mmio_start, mmio_len; | 603 | unsigned long mmio_start, mmio_len; |
520 | uint32_t ctrl_ext; | ||
521 | uint32_t swsm; | ||
522 | 604 | ||
523 | static int cards_found = 0; | 605 | static int cards_found = 0; |
524 | int i, err, pci_using_dac; | 606 | int i, err, pci_using_dac; |
@@ -712,8 +794,7 @@ e1000_probe(struct pci_dev *pdev, | |||
712 | case e1000_82546: | 794 | case e1000_82546: |
713 | case e1000_82546_rev_3: | 795 | case e1000_82546_rev_3: |
714 | case e1000_82571: | 796 | case e1000_82571: |
715 | if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1) | 797 | if(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ |
716 | && (adapter->hw.media_type == e1000_media_type_copper)) { | ||
717 | e1000_read_eeprom(&adapter->hw, | 798 | e1000_read_eeprom(&adapter->hw, |
718 | EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); | 799 | EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); |
719 | break; | 800 | break; |
@@ -727,25 +808,36 @@ e1000_probe(struct pci_dev *pdev, | |||
727 | if(eeprom_data & eeprom_apme_mask) | 808 | if(eeprom_data & eeprom_apme_mask) |
728 | adapter->wol |= E1000_WUFC_MAG; | 809 | adapter->wol |= E1000_WUFC_MAG; |
729 | 810 | ||
811 | /* print bus type/speed/width info */ | ||
812 | { | ||
813 | struct e1000_hw *hw = &adapter->hw; | ||
814 | DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", | ||
815 | ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : | ||
816 | (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")), | ||
817 | ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" : | ||
818 | (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" : | ||
819 | (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" : | ||
820 | (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" : | ||
821 | (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"), | ||
822 | ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : | ||
823 | (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" : | ||
824 | (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" : | ||
825 | "32-bit")); | ||
826 | } | ||
827 | |||
828 | for (i = 0; i < 6; i++) | ||
829 | printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':'); | ||
830 | |||
730 | /* reset the hardware with the new settings */ | 831 | /* reset the hardware with the new settings */ |
731 | e1000_reset(adapter); | 832 | e1000_reset(adapter); |
732 | 833 | ||
733 | /* Let firmware know the driver has taken over */ | 834 | /* If the controller is 82573 and f/w is AMT, do not set |
734 | switch(adapter->hw.mac_type) { | 835 | * DRV_LOAD until the interface is up. For all other cases, |
735 | case e1000_82571: | 836 | * let the f/w know that the h/w is now under the control |
736 | case e1000_82572: | 837 | * of the driver. */ |
737 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | 838 | if (adapter->hw.mac_type != e1000_82573 || |
738 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, | 839 | !e1000_check_mng_mode(&adapter->hw)) |
739 | ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); | 840 | e1000_get_hw_control(adapter); |
740 | break; | ||
741 | case e1000_82573: | ||
742 | swsm = E1000_READ_REG(&adapter->hw, SWSM); | ||
743 | E1000_WRITE_REG(&adapter->hw, SWSM, | ||
744 | swsm | E1000_SWSM_DRV_LOAD); | ||
745 | break; | ||
746 | default: | ||
747 | break; | ||
748 | } | ||
749 | 841 | ||
750 | strcpy(netdev->name, "eth%d"); | 842 | strcpy(netdev->name, "eth%d"); |
751 | if((err = register_netdev(netdev))) | 843 | if((err = register_netdev(netdev))) |
@@ -782,8 +874,7 @@ e1000_remove(struct pci_dev *pdev) | |||
782 | { | 874 | { |
783 | struct net_device *netdev = pci_get_drvdata(pdev); | 875 | struct net_device *netdev = pci_get_drvdata(pdev); |
784 | struct e1000_adapter *adapter = netdev_priv(netdev); | 876 | struct e1000_adapter *adapter = netdev_priv(netdev); |
785 | uint32_t ctrl_ext; | 877 | uint32_t manc; |
786 | uint32_t manc, swsm; | ||
787 | #ifdef CONFIG_E1000_NAPI | 878 | #ifdef CONFIG_E1000_NAPI |
788 | int i; | 879 | int i; |
789 | #endif | 880 | #endif |
@@ -799,26 +890,13 @@ e1000_remove(struct pci_dev *pdev) | |||
799 | } | 890 | } |
800 | } | 891 | } |
801 | 892 | ||
802 | switch(adapter->hw.mac_type) { | 893 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
803 | case e1000_82571: | 894 | * would have already happened in close and is redundant. */ |
804 | case e1000_82572: | 895 | e1000_release_hw_control(adapter); |
805 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | ||
806 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, | ||
807 | ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); | ||
808 | break; | ||
809 | case e1000_82573: | ||
810 | swsm = E1000_READ_REG(&adapter->hw, SWSM); | ||
811 | E1000_WRITE_REG(&adapter->hw, SWSM, | ||
812 | swsm & ~E1000_SWSM_DRV_LOAD); | ||
813 | break; | ||
814 | |||
815 | default: | ||
816 | break; | ||
817 | } | ||
818 | 896 | ||
819 | unregister_netdev(netdev); | 897 | unregister_netdev(netdev); |
820 | #ifdef CONFIG_E1000_NAPI | 898 | #ifdef CONFIG_E1000_NAPI |
821 | for (i = 0; i < adapter->num_queues; i++) | 899 | for (i = 0; i < adapter->num_rx_queues; i++) |
822 | __dev_put(&adapter->polling_netdev[i]); | 900 | __dev_put(&adapter->polling_netdev[i]); |
823 | #endif | 901 | #endif |
824 | 902 | ||
@@ -923,15 +1001,34 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
923 | switch (hw->mac_type) { | 1001 | switch (hw->mac_type) { |
924 | case e1000_82571: | 1002 | case e1000_82571: |
925 | case e1000_82572: | 1003 | case e1000_82572: |
926 | adapter->num_queues = 2; | 1004 | /* These controllers support 2 tx queues, but with a single |
1005 | * qdisc implementation, multiple tx queues aren't quite as | ||
1006 | * interesting. If we can find a logical way of mapping | ||
1007 | * flows to a queue, then perhaps we can up the num_tx_queue | ||
1008 | * count back to its default. Until then, we run the risk of | ||
1009 | * terrible performance due to SACK overload. */ | ||
1010 | adapter->num_tx_queues = 1; | ||
1011 | adapter->num_rx_queues = 2; | ||
927 | break; | 1012 | break; |
928 | default: | 1013 | default: |
929 | adapter->num_queues = 1; | 1014 | adapter->num_tx_queues = 1; |
1015 | adapter->num_rx_queues = 1; | ||
930 | break; | 1016 | break; |
931 | } | 1017 | } |
932 | adapter->num_queues = min(adapter->num_queues, num_online_cpus()); | 1018 | adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus()); |
1019 | adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus()); | ||
1020 | DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n", | ||
1021 | adapter->num_rx_queues, | ||
1022 | ((adapter->num_rx_queues == 1) | ||
1023 | ? ((num_online_cpus() > 1) | ||
1024 | ? "(due to unsupported feature in current adapter)" | ||
1025 | : "(due to unsupported system configuration)") | ||
1026 | : "")); | ||
1027 | DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n", | ||
1028 | adapter->num_tx_queues); | ||
933 | #else | 1029 | #else |
934 | adapter->num_queues = 1; | 1030 | adapter->num_tx_queues = 1; |
1031 | adapter->num_rx_queues = 1; | ||
935 | #endif | 1032 | #endif |
936 | 1033 | ||
937 | if (e1000_alloc_queues(adapter)) { | 1034 | if (e1000_alloc_queues(adapter)) { |
@@ -940,17 +1037,14 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
940 | } | 1037 | } |
941 | 1038 | ||
942 | #ifdef CONFIG_E1000_NAPI | 1039 | #ifdef CONFIG_E1000_NAPI |
943 | for (i = 0; i < adapter->num_queues; i++) { | 1040 | for (i = 0; i < adapter->num_rx_queues; i++) { |
944 | adapter->polling_netdev[i].priv = adapter; | 1041 | adapter->polling_netdev[i].priv = adapter; |
945 | adapter->polling_netdev[i].poll = &e1000_clean; | 1042 | adapter->polling_netdev[i].poll = &e1000_clean; |
946 | adapter->polling_netdev[i].weight = 64; | 1043 | adapter->polling_netdev[i].weight = 64; |
947 | dev_hold(&adapter->polling_netdev[i]); | 1044 | dev_hold(&adapter->polling_netdev[i]); |
948 | set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); | 1045 | set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); |
949 | } | 1046 | } |
950 | #endif | 1047 | spin_lock_init(&adapter->tx_queue_lock); |
951 | |||
952 | #ifdef CONFIG_E1000_MQ | ||
953 | e1000_setup_queue_mapping(adapter); | ||
954 | #endif | 1048 | #endif |
955 | 1049 | ||
956 | atomic_set(&adapter->irq_sem, 1); | 1050 | atomic_set(&adapter->irq_sem, 1); |
@@ -973,13 +1067,13 @@ e1000_alloc_queues(struct e1000_adapter *adapter) | |||
973 | { | 1067 | { |
974 | int size; | 1068 | int size; |
975 | 1069 | ||
976 | size = sizeof(struct e1000_tx_ring) * adapter->num_queues; | 1070 | size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues; |
977 | adapter->tx_ring = kmalloc(size, GFP_KERNEL); | 1071 | adapter->tx_ring = kmalloc(size, GFP_KERNEL); |
978 | if (!adapter->tx_ring) | 1072 | if (!adapter->tx_ring) |
979 | return -ENOMEM; | 1073 | return -ENOMEM; |
980 | memset(adapter->tx_ring, 0, size); | 1074 | memset(adapter->tx_ring, 0, size); |
981 | 1075 | ||
982 | size = sizeof(struct e1000_rx_ring) * adapter->num_queues; | 1076 | size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues; |
983 | adapter->rx_ring = kmalloc(size, GFP_KERNEL); | 1077 | adapter->rx_ring = kmalloc(size, GFP_KERNEL); |
984 | if (!adapter->rx_ring) { | 1078 | if (!adapter->rx_ring) { |
985 | kfree(adapter->tx_ring); | 1079 | kfree(adapter->tx_ring); |
@@ -988,7 +1082,7 @@ e1000_alloc_queues(struct e1000_adapter *adapter) | |||
988 | memset(adapter->rx_ring, 0, size); | 1082 | memset(adapter->rx_ring, 0, size); |
989 | 1083 | ||
990 | #ifdef CONFIG_E1000_NAPI | 1084 | #ifdef CONFIG_E1000_NAPI |
991 | size = sizeof(struct net_device) * adapter->num_queues; | 1085 | size = sizeof(struct net_device) * adapter->num_rx_queues; |
992 | adapter->polling_netdev = kmalloc(size, GFP_KERNEL); | 1086 | adapter->polling_netdev = kmalloc(size, GFP_KERNEL); |
993 | if (!adapter->polling_netdev) { | 1087 | if (!adapter->polling_netdev) { |
994 | kfree(adapter->tx_ring); | 1088 | kfree(adapter->tx_ring); |
@@ -998,6 +1092,14 @@ e1000_alloc_queues(struct e1000_adapter *adapter) | |||
998 | memset(adapter->polling_netdev, 0, size); | 1092 | memset(adapter->polling_netdev, 0, size); |
999 | #endif | 1093 | #endif |
1000 | 1094 | ||
1095 | #ifdef CONFIG_E1000_MQ | ||
1096 | adapter->rx_sched_call_data.func = e1000_rx_schedule; | ||
1097 | adapter->rx_sched_call_data.info = adapter->netdev; | ||
1098 | |||
1099 | adapter->cpu_netdev = alloc_percpu(struct net_device *); | ||
1100 | adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *); | ||
1101 | #endif | ||
1102 | |||
1001 | return E1000_SUCCESS; | 1103 | return E1000_SUCCESS; |
1002 | } | 1104 | } |
1003 | 1105 | ||
@@ -1017,14 +1119,15 @@ e1000_setup_queue_mapping(struct e1000_adapter *adapter) | |||
1017 | lock_cpu_hotplug(); | 1119 | lock_cpu_hotplug(); |
1018 | i = 0; | 1120 | i = 0; |
1019 | for_each_online_cpu(cpu) { | 1121 | for_each_online_cpu(cpu) { |
1020 | *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues]; | 1122 | *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues]; |
1021 | /* This is incomplete because we'd like to assign separate | 1123 | /* This is incomplete because we'd like to assign separate |
1022 | * physical cpus to these netdev polling structures and | 1124 | * physical cpus to these netdev polling structures and |
1023 | * avoid saturating a subset of cpus. | 1125 | * avoid saturating a subset of cpus. |
1024 | */ | 1126 | */ |
1025 | if (i < adapter->num_queues) { | 1127 | if (i < adapter->num_rx_queues) { |
1026 | *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i]; | 1128 | *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i]; |
1027 | adapter->cpu_for_queue[i] = cpu; | 1129 | adapter->rx_ring[i].cpu = cpu; |
1130 | cpu_set(cpu, adapter->cpumask); | ||
1028 | } else | 1131 | } else |
1029 | *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL; | 1132 | *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL; |
1030 | 1133 | ||
@@ -1071,6 +1174,12 @@ e1000_open(struct net_device *netdev) | |||
1071 | e1000_update_mng_vlan(adapter); | 1174 | e1000_update_mng_vlan(adapter); |
1072 | } | 1175 | } |
1073 | 1176 | ||
1177 | /* If AMT is enabled, let the firmware know that the network | ||
1178 | * interface is now open */ | ||
1179 | if (adapter->hw.mac_type == e1000_82573 && | ||
1180 | e1000_check_mng_mode(&adapter->hw)) | ||
1181 | e1000_get_hw_control(adapter); | ||
1182 | |||
1074 | return E1000_SUCCESS; | 1183 | return E1000_SUCCESS; |
1075 | 1184 | ||
1076 | err_up: | 1185 | err_up: |
@@ -1109,6 +1218,13 @@ e1000_close(struct net_device *netdev) | |||
1109 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { | 1218 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { |
1110 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | 1219 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); |
1111 | } | 1220 | } |
1221 | |||
1222 | /* If AMT is enabled, let the firmware know that the network | ||
1223 | * interface is now closed */ | ||
1224 | if (adapter->hw.mac_type == e1000_82573 && | ||
1225 | e1000_check_mng_mode(&adapter->hw)) | ||
1226 | e1000_release_hw_control(adapter); | ||
1227 | |||
1112 | return 0; | 1228 | return 0; |
1113 | } | 1229 | } |
1114 | 1230 | ||
@@ -1229,7 +1345,7 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter) | |||
1229 | { | 1345 | { |
1230 | int i, err = 0; | 1346 | int i, err = 0; |
1231 | 1347 | ||
1232 | for (i = 0; i < adapter->num_queues; i++) { | 1348 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1233 | err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); | 1349 | err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); |
1234 | if (err) { | 1350 | if (err) { |
1235 | DPRINTK(PROBE, ERR, | 1351 | DPRINTK(PROBE, ERR, |
@@ -1254,10 +1370,11 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1254 | uint64_t tdba; | 1370 | uint64_t tdba; |
1255 | struct e1000_hw *hw = &adapter->hw; | 1371 | struct e1000_hw *hw = &adapter->hw; |
1256 | uint32_t tdlen, tctl, tipg, tarc; | 1372 | uint32_t tdlen, tctl, tipg, tarc; |
1373 | uint32_t ipgr1, ipgr2; | ||
1257 | 1374 | ||
1258 | /* Setup the HW Tx Head and Tail descriptor pointers */ | 1375 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
1259 | 1376 | ||
1260 | switch (adapter->num_queues) { | 1377 | switch (adapter->num_tx_queues) { |
1261 | case 2: | 1378 | case 2: |
1262 | tdba = adapter->tx_ring[1].dma; | 1379 | tdba = adapter->tx_ring[1].dma; |
1263 | tdlen = adapter->tx_ring[1].count * | 1380 | tdlen = adapter->tx_ring[1].count * |
@@ -1287,22 +1404,26 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1287 | 1404 | ||
1288 | /* Set the default values for the Tx Inter Packet Gap timer */ | 1405 | /* Set the default values for the Tx Inter Packet Gap timer */ |
1289 | 1406 | ||
1407 | if (hw->media_type == e1000_media_type_fiber || | ||
1408 | hw->media_type == e1000_media_type_internal_serdes) | ||
1409 | tipg = DEFAULT_82543_TIPG_IPGT_FIBER; | ||
1410 | else | ||
1411 | tipg = DEFAULT_82543_TIPG_IPGT_COPPER; | ||
1412 | |||
1290 | switch (hw->mac_type) { | 1413 | switch (hw->mac_type) { |
1291 | case e1000_82542_rev2_0: | 1414 | case e1000_82542_rev2_0: |
1292 | case e1000_82542_rev2_1: | 1415 | case e1000_82542_rev2_1: |
1293 | tipg = DEFAULT_82542_TIPG_IPGT; | 1416 | tipg = DEFAULT_82542_TIPG_IPGT; |
1294 | tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; | 1417 | ipgr1 = DEFAULT_82542_TIPG_IPGR1; |
1295 | tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; | 1418 | ipgr2 = DEFAULT_82542_TIPG_IPGR2; |
1296 | break; | 1419 | break; |
1297 | default: | 1420 | default: |
1298 | if (hw->media_type == e1000_media_type_fiber || | 1421 | ipgr1 = DEFAULT_82543_TIPG_IPGR1; |
1299 | hw->media_type == e1000_media_type_internal_serdes) | 1422 | ipgr2 = DEFAULT_82543_TIPG_IPGR2; |
1300 | tipg = DEFAULT_82543_TIPG_IPGT_FIBER; | 1423 | break; |
1301 | else | ||
1302 | tipg = DEFAULT_82543_TIPG_IPGT_COPPER; | ||
1303 | tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; | ||
1304 | tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; | ||
1305 | } | 1424 | } |
1425 | tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; | ||
1426 | tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; | ||
1306 | E1000_WRITE_REG(hw, TIPG, tipg); | 1427 | E1000_WRITE_REG(hw, TIPG, tipg); |
1307 | 1428 | ||
1308 | /* Set the Tx Interrupt Delay register */ | 1429 | /* Set the Tx Interrupt Delay register */ |
@@ -1454,6 +1575,8 @@ setup_rx_desc_die: | |||
1454 | 1575 | ||
1455 | rxdr->next_to_clean = 0; | 1576 | rxdr->next_to_clean = 0; |
1456 | rxdr->next_to_use = 0; | 1577 | rxdr->next_to_use = 0; |
1578 | rxdr->rx_skb_top = NULL; | ||
1579 | rxdr->rx_skb_prev = NULL; | ||
1457 | 1580 | ||
1458 | return 0; | 1581 | return 0; |
1459 | } | 1582 | } |
@@ -1475,7 +1598,7 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter) | |||
1475 | { | 1598 | { |
1476 | int i, err = 0; | 1599 | int i, err = 0; |
1477 | 1600 | ||
1478 | for (i = 0; i < adapter->num_queues; i++) { | 1601 | for (i = 0; i < adapter->num_rx_queues; i++) { |
1479 | err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); | 1602 | err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); |
1480 | if (err) { | 1603 | if (err) { |
1481 | DPRINTK(PROBE, ERR, | 1604 | DPRINTK(PROBE, ERR, |
@@ -1510,7 +1633,10 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1510 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | | 1633 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | |
1511 | (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); | 1634 | (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); |
1512 | 1635 | ||
1513 | if(adapter->hw.tbi_compatibility_on == 1) | 1636 | if (adapter->hw.mac_type > e1000_82543) |
1637 | rctl |= E1000_RCTL_SECRC; | ||
1638 | |||
1639 | if (adapter->hw.tbi_compatibility_on == 1) | ||
1514 | rctl |= E1000_RCTL_SBP; | 1640 | rctl |= E1000_RCTL_SBP; |
1515 | else | 1641 | else |
1516 | rctl &= ~E1000_RCTL_SBP; | 1642 | rctl &= ~E1000_RCTL_SBP; |
@@ -1638,16 +1764,21 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1638 | } | 1764 | } |
1639 | 1765 | ||
1640 | if (hw->mac_type >= e1000_82571) { | 1766 | if (hw->mac_type >= e1000_82571) { |
1641 | /* Reset delay timers after every interrupt */ | ||
1642 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 1767 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); |
1768 | /* Reset delay timers after every interrupt */ | ||
1643 | ctrl_ext |= E1000_CTRL_EXT_CANC; | 1769 | ctrl_ext |= E1000_CTRL_EXT_CANC; |
1770 | #ifdef CONFIG_E1000_NAPI | ||
1771 | /* Auto-Mask interrupts upon ICR read. */ | ||
1772 | ctrl_ext |= E1000_CTRL_EXT_IAME; | ||
1773 | #endif | ||
1644 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 1774 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); |
1775 | E1000_WRITE_REG(hw, IAM, ~0); | ||
1645 | E1000_WRITE_FLUSH(hw); | 1776 | E1000_WRITE_FLUSH(hw); |
1646 | } | 1777 | } |
1647 | 1778 | ||
1648 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 1779 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
1649 | * the Base and Length of the Rx Descriptor Ring */ | 1780 | * the Base and Length of the Rx Descriptor Ring */ |
1650 | switch (adapter->num_queues) { | 1781 | switch (adapter->num_rx_queues) { |
1651 | #ifdef CONFIG_E1000_MQ | 1782 | #ifdef CONFIG_E1000_MQ |
1652 | case 2: | 1783 | case 2: |
1653 | rdba = adapter->rx_ring[1].dma; | 1784 | rdba = adapter->rx_ring[1].dma; |
@@ -1674,7 +1805,7 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1674 | } | 1805 | } |
1675 | 1806 | ||
1676 | #ifdef CONFIG_E1000_MQ | 1807 | #ifdef CONFIG_E1000_MQ |
1677 | if (adapter->num_queues > 1) { | 1808 | if (adapter->num_rx_queues > 1) { |
1678 | uint32_t random[10]; | 1809 | uint32_t random[10]; |
1679 | 1810 | ||
1680 | get_random_bytes(&random[0], 40); | 1811 | get_random_bytes(&random[0], 40); |
@@ -1684,7 +1815,7 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1684 | E1000_WRITE_REG(hw, RSSIM, 0); | 1815 | E1000_WRITE_REG(hw, RSSIM, 0); |
1685 | } | 1816 | } |
1686 | 1817 | ||
1687 | switch (adapter->num_queues) { | 1818 | switch (adapter->num_rx_queues) { |
1688 | case 2: | 1819 | case 2: |
1689 | default: | 1820 | default: |
1690 | reta = 0x00800080; | 1821 | reta = 0x00800080; |
@@ -1776,7 +1907,7 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter) | |||
1776 | { | 1907 | { |
1777 | int i; | 1908 | int i; |
1778 | 1909 | ||
1779 | for (i = 0; i < adapter->num_queues; i++) | 1910 | for (i = 0; i < adapter->num_tx_queues; i++) |
1780 | e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); | 1911 | e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); |
1781 | } | 1912 | } |
1782 | 1913 | ||
@@ -1789,12 +1920,10 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, | |||
1789 | buffer_info->dma, | 1920 | buffer_info->dma, |
1790 | buffer_info->length, | 1921 | buffer_info->length, |
1791 | PCI_DMA_TODEVICE); | 1922 | PCI_DMA_TODEVICE); |
1792 | buffer_info->dma = 0; | ||
1793 | } | 1923 | } |
1794 | if(buffer_info->skb) { | 1924 | if (buffer_info->skb) |
1795 | dev_kfree_skb_any(buffer_info->skb); | 1925 | dev_kfree_skb_any(buffer_info->skb); |
1796 | buffer_info->skb = NULL; | 1926 | memset(buffer_info, 0, sizeof(struct e1000_buffer)); |
1797 | } | ||
1798 | } | 1927 | } |
1799 | 1928 | ||
1800 | /** | 1929 | /** |
@@ -1843,7 +1972,7 @@ e1000_clean_all_tx_rings(struct e1000_adapter *adapter) | |||
1843 | { | 1972 | { |
1844 | int i; | 1973 | int i; |
1845 | 1974 | ||
1846 | for (i = 0; i < adapter->num_queues; i++) | 1975 | for (i = 0; i < adapter->num_tx_queues; i++) |
1847 | e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); | 1976 | e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); |
1848 | } | 1977 | } |
1849 | 1978 | ||
@@ -1887,7 +2016,7 @@ e1000_free_all_rx_resources(struct e1000_adapter *adapter) | |||
1887 | { | 2016 | { |
1888 | int i; | 2017 | int i; |
1889 | 2018 | ||
1890 | for (i = 0; i < adapter->num_queues; i++) | 2019 | for (i = 0; i < adapter->num_rx_queues; i++) |
1891 | e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); | 2020 | e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); |
1892 | } | 2021 | } |
1893 | 2022 | ||
@@ -1913,8 +2042,6 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter, | |||
1913 | for(i = 0; i < rx_ring->count; i++) { | 2042 | for(i = 0; i < rx_ring->count; i++) { |
1914 | buffer_info = &rx_ring->buffer_info[i]; | 2043 | buffer_info = &rx_ring->buffer_info[i]; |
1915 | if(buffer_info->skb) { | 2044 | if(buffer_info->skb) { |
1916 | ps_page = &rx_ring->ps_page[i]; | ||
1917 | ps_page_dma = &rx_ring->ps_page_dma[i]; | ||
1918 | pci_unmap_single(pdev, | 2045 | pci_unmap_single(pdev, |
1919 | buffer_info->dma, | 2046 | buffer_info->dma, |
1920 | buffer_info->length, | 2047 | buffer_info->length, |
@@ -1922,19 +2049,30 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter, | |||
1922 | 2049 | ||
1923 | dev_kfree_skb(buffer_info->skb); | 2050 | dev_kfree_skb(buffer_info->skb); |
1924 | buffer_info->skb = NULL; | 2051 | buffer_info->skb = NULL; |
1925 | 2052 | } | |
1926 | for(j = 0; j < adapter->rx_ps_pages; j++) { | 2053 | ps_page = &rx_ring->ps_page[i]; |
1927 | if(!ps_page->ps_page[j]) break; | 2054 | ps_page_dma = &rx_ring->ps_page_dma[i]; |
1928 | pci_unmap_single(pdev, | 2055 | for (j = 0; j < adapter->rx_ps_pages; j++) { |
1929 | ps_page_dma->ps_page_dma[j], | 2056 | if (!ps_page->ps_page[j]) break; |
1930 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | 2057 | pci_unmap_page(pdev, |
1931 | ps_page_dma->ps_page_dma[j] = 0; | 2058 | ps_page_dma->ps_page_dma[j], |
1932 | put_page(ps_page->ps_page[j]); | 2059 | PAGE_SIZE, PCI_DMA_FROMDEVICE); |
1933 | ps_page->ps_page[j] = NULL; | 2060 | ps_page_dma->ps_page_dma[j] = 0; |
1934 | } | 2061 | put_page(ps_page->ps_page[j]); |
2062 | ps_page->ps_page[j] = NULL; | ||
1935 | } | 2063 | } |
1936 | } | 2064 | } |
1937 | 2065 | ||
2066 | /* there also may be some cached data in our adapter */ | ||
2067 | if (rx_ring->rx_skb_top) { | ||
2068 | dev_kfree_skb(rx_ring->rx_skb_top); | ||
2069 | |||
2070 | /* rx_skb_prev will be wiped out by rx_skb_top */ | ||
2071 | rx_ring->rx_skb_top = NULL; | ||
2072 | rx_ring->rx_skb_prev = NULL; | ||
2073 | } | ||
2074 | |||
2075 | |||
1938 | size = sizeof(struct e1000_buffer) * rx_ring->count; | 2076 | size = sizeof(struct e1000_buffer) * rx_ring->count; |
1939 | memset(rx_ring->buffer_info, 0, size); | 2077 | memset(rx_ring->buffer_info, 0, size); |
1940 | size = sizeof(struct e1000_ps_page) * rx_ring->count; | 2078 | size = sizeof(struct e1000_ps_page) * rx_ring->count; |
@@ -1963,7 +2101,7 @@ e1000_clean_all_rx_rings(struct e1000_adapter *adapter) | |||
1963 | { | 2101 | { |
1964 | int i; | 2102 | int i; |
1965 | 2103 | ||
1966 | for (i = 0; i < adapter->num_queues; i++) | 2104 | for (i = 0; i < adapter->num_rx_queues; i++) |
1967 | e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); | 2105 | e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); |
1968 | } | 2106 | } |
1969 | 2107 | ||
@@ -2005,7 +2143,9 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter) | |||
2005 | 2143 | ||
2006 | if(netif_running(netdev)) { | 2144 | if(netif_running(netdev)) { |
2007 | e1000_configure_rx(adapter); | 2145 | e1000_configure_rx(adapter); |
2008 | e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]); | 2146 | /* No need to loop, because 82542 supports only 1 queue */ |
2147 | struct e1000_rx_ring *ring = &adapter->rx_ring[0]; | ||
2148 | adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); | ||
2009 | } | 2149 | } |
2010 | } | 2150 | } |
2011 | 2151 | ||
@@ -2204,7 +2344,7 @@ static void | |||
2204 | e1000_watchdog_task(struct e1000_adapter *adapter) | 2344 | e1000_watchdog_task(struct e1000_adapter *adapter) |
2205 | { | 2345 | { |
2206 | struct net_device *netdev = adapter->netdev; | 2346 | struct net_device *netdev = adapter->netdev; |
2207 | struct e1000_tx_ring *txdr = &adapter->tx_ring[0]; | 2347 | struct e1000_tx_ring *txdr = adapter->tx_ring; |
2208 | uint32_t link; | 2348 | uint32_t link; |
2209 | 2349 | ||
2210 | e1000_check_for_link(&adapter->hw); | 2350 | e1000_check_for_link(&adapter->hw); |
@@ -2231,6 +2371,21 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2231 | adapter->link_duplex == FULL_DUPLEX ? | 2371 | adapter->link_duplex == FULL_DUPLEX ? |
2232 | "Full Duplex" : "Half Duplex"); | 2372 | "Full Duplex" : "Half Duplex"); |
2233 | 2373 | ||
2374 | /* tweak tx_queue_len according to speed/duplex */ | ||
2375 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
2376 | adapter->tx_timeout_factor = 1; | ||
2377 | if (adapter->link_duplex == HALF_DUPLEX) { | ||
2378 | switch (adapter->link_speed) { | ||
2379 | case SPEED_10: | ||
2380 | netdev->tx_queue_len = 10; | ||
2381 | adapter->tx_timeout_factor = 8; | ||
2382 | break; | ||
2383 | case SPEED_100: | ||
2384 | netdev->tx_queue_len = 100; | ||
2385 | break; | ||
2386 | } | ||
2387 | } | ||
2388 | |||
2234 | netif_carrier_on(netdev); | 2389 | netif_carrier_on(netdev); |
2235 | netif_wake_queue(netdev); | 2390 | netif_wake_queue(netdev); |
2236 | mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); | 2391 | mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); |
@@ -2263,7 +2418,10 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2263 | 2418 | ||
2264 | e1000_update_adaptive(&adapter->hw); | 2419 | e1000_update_adaptive(&adapter->hw); |
2265 | 2420 | ||
2266 | if (adapter->num_queues == 1 && !netif_carrier_ok(netdev)) { | 2421 | #ifdef CONFIG_E1000_MQ |
2422 | txdr = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id()); | ||
2423 | #endif | ||
2424 | if (!netif_carrier_ok(netdev)) { | ||
2267 | if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { | 2425 | if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { |
2268 | /* We've lost link, so the controller stops DMA, | 2426 | /* We've lost link, so the controller stops DMA, |
2269 | * but we've got queued Tx work that's never going | 2427 | * but we've got queued Tx work that's never going |
@@ -2314,6 +2472,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2314 | { | 2472 | { |
2315 | #ifdef NETIF_F_TSO | 2473 | #ifdef NETIF_F_TSO |
2316 | struct e1000_context_desc *context_desc; | 2474 | struct e1000_context_desc *context_desc; |
2475 | struct e1000_buffer *buffer_info; | ||
2317 | unsigned int i; | 2476 | unsigned int i; |
2318 | uint32_t cmd_length = 0; | 2477 | uint32_t cmd_length = 0; |
2319 | uint16_t ipcse = 0, tucse, mss; | 2478 | uint16_t ipcse = 0, tucse, mss; |
@@ -2363,6 +2522,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2363 | 2522 | ||
2364 | i = tx_ring->next_to_use; | 2523 | i = tx_ring->next_to_use; |
2365 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); | 2524 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); |
2525 | buffer_info = &tx_ring->buffer_info[i]; | ||
2366 | 2526 | ||
2367 | context_desc->lower_setup.ip_fields.ipcss = ipcss; | 2527 | context_desc->lower_setup.ip_fields.ipcss = ipcss; |
2368 | context_desc->lower_setup.ip_fields.ipcso = ipcso; | 2528 | context_desc->lower_setup.ip_fields.ipcso = ipcso; |
@@ -2374,14 +2534,16 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2374 | context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; | 2534 | context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; |
2375 | context_desc->cmd_and_length = cpu_to_le32(cmd_length); | 2535 | context_desc->cmd_and_length = cpu_to_le32(cmd_length); |
2376 | 2536 | ||
2537 | buffer_info->time_stamp = jiffies; | ||
2538 | |||
2377 | if (++i == tx_ring->count) i = 0; | 2539 | if (++i == tx_ring->count) i = 0; |
2378 | tx_ring->next_to_use = i; | 2540 | tx_ring->next_to_use = i; |
2379 | 2541 | ||
2380 | return 1; | 2542 | return TRUE; |
2381 | } | 2543 | } |
2382 | #endif | 2544 | #endif |
2383 | 2545 | ||
2384 | return 0; | 2546 | return FALSE; |
2385 | } | 2547 | } |
2386 | 2548 | ||
2387 | static inline boolean_t | 2549 | static inline boolean_t |
@@ -2389,6 +2551,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2389 | struct sk_buff *skb) | 2551 | struct sk_buff *skb) |
2390 | { | 2552 | { |
2391 | struct e1000_context_desc *context_desc; | 2553 | struct e1000_context_desc *context_desc; |
2554 | struct e1000_buffer *buffer_info; | ||
2392 | unsigned int i; | 2555 | unsigned int i; |
2393 | uint8_t css; | 2556 | uint8_t css; |
2394 | 2557 | ||
@@ -2396,6 +2559,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2396 | css = skb->h.raw - skb->data; | 2559 | css = skb->h.raw - skb->data; |
2397 | 2560 | ||
2398 | i = tx_ring->next_to_use; | 2561 | i = tx_ring->next_to_use; |
2562 | buffer_info = &tx_ring->buffer_info[i]; | ||
2399 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); | 2563 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); |
2400 | 2564 | ||
2401 | context_desc->upper_setup.tcp_fields.tucss = css; | 2565 | context_desc->upper_setup.tcp_fields.tucss = css; |
@@ -2404,6 +2568,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2404 | context_desc->tcp_seg_setup.data = 0; | 2568 | context_desc->tcp_seg_setup.data = 0; |
2405 | context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); | 2569 | context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); |
2406 | 2570 | ||
2571 | buffer_info->time_stamp = jiffies; | ||
2572 | |||
2407 | if (unlikely(++i == tx_ring->count)) i = 0; | 2573 | if (unlikely(++i == tx_ring->count)) i = 0; |
2408 | tx_ring->next_to_use = i; | 2574 | tx_ring->next_to_use = i; |
2409 | 2575 | ||
@@ -2688,11 +2854,30 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2688 | * overrun the FIFO, adjust the max buffer len if mss | 2854 | * overrun the FIFO, adjust the max buffer len if mss |
2689 | * drops. */ | 2855 | * drops. */ |
2690 | if(mss) { | 2856 | if(mss) { |
2857 | uint8_t hdr_len; | ||
2691 | max_per_txd = min(mss << 2, max_per_txd); | 2858 | max_per_txd = min(mss << 2, max_per_txd); |
2692 | max_txd_pwr = fls(max_per_txd) - 1; | 2859 | max_txd_pwr = fls(max_per_txd) - 1; |
2860 | |||
2861 | /* TSO Workaround for 82571/2 Controllers -- if skb->data | ||
2862 | * points to just header, pull a few bytes of payload from | ||
2863 | * frags into skb->data */ | ||
2864 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | ||
2865 | if (skb->data_len && (hdr_len == (skb->len - skb->data_len)) && | ||
2866 | (adapter->hw.mac_type == e1000_82571 || | ||
2867 | adapter->hw.mac_type == e1000_82572)) { | ||
2868 | unsigned int pull_size; | ||
2869 | pull_size = min((unsigned int)4, skb->data_len); | ||
2870 | if (!__pskb_pull_tail(skb, pull_size)) { | ||
2871 | printk(KERN_ERR "__pskb_pull_tail failed.\n"); | ||
2872 | dev_kfree_skb_any(skb); | ||
2873 | return -EFAULT; | ||
2874 | } | ||
2875 | len = skb->len - skb->data_len; | ||
2876 | } | ||
2693 | } | 2877 | } |
2694 | 2878 | ||
2695 | if((mss) || (skb->ip_summed == CHECKSUM_HW)) | 2879 | if((mss) || (skb->ip_summed == CHECKSUM_HW)) |
2880 | /* reserve a descriptor for the offload context */ | ||
2696 | count++; | 2881 | count++; |
2697 | count++; | 2882 | count++; |
2698 | #else | 2883 | #else |
@@ -2726,27 +2911,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2726 | if(adapter->pcix_82544) | 2911 | if(adapter->pcix_82544) |
2727 | count += nr_frags; | 2912 | count += nr_frags; |
2728 | 2913 | ||
2729 | #ifdef NETIF_F_TSO | ||
2730 | /* TSO Workaround for 82571/2 Controllers -- if skb->data | ||
2731 | * points to just header, pull a few bytes of payload from | ||
2732 | * frags into skb->data */ | ||
2733 | if (skb_shinfo(skb)->tso_size) { | ||
2734 | uint8_t hdr_len; | ||
2735 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | ||
2736 | if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) && | ||
2737 | (adapter->hw.mac_type == e1000_82571 || | ||
2738 | adapter->hw.mac_type == e1000_82572)) { | ||
2739 | unsigned int pull_size; | ||
2740 | pull_size = min((unsigned int)4, skb->data_len); | ||
2741 | if (!__pskb_pull_tail(skb, pull_size)) { | ||
2742 | printk(KERN_ERR "__pskb_pull_tail failed.\n"); | ||
2743 | dev_kfree_skb_any(skb); | ||
2744 | return -EFAULT; | ||
2745 | } | ||
2746 | } | ||
2747 | } | ||
2748 | #endif | ||
2749 | |||
2750 | if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) | 2914 | if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) |
2751 | e1000_transfer_dhcp_info(adapter, skb); | 2915 | e1000_transfer_dhcp_info(adapter, skb); |
2752 | 2916 | ||
@@ -2833,6 +2997,7 @@ e1000_tx_timeout_task(struct net_device *netdev) | |||
2833 | { | 2997 | { |
2834 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2998 | struct e1000_adapter *adapter = netdev_priv(netdev); |
2835 | 2999 | ||
3000 | adapter->tx_timeout_count++; | ||
2836 | e1000_down(adapter); | 3001 | e1000_down(adapter); |
2837 | e1000_up(adapter); | 3002 | e1000_up(adapter); |
2838 | } | 3003 | } |
@@ -2850,7 +3015,7 @@ e1000_get_stats(struct net_device *netdev) | |||
2850 | { | 3015 | { |
2851 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3016 | struct e1000_adapter *adapter = netdev_priv(netdev); |
2852 | 3017 | ||
2853 | e1000_update_stats(adapter); | 3018 | /* only return the current stats */ |
2854 | return &adapter->net_stats; | 3019 | return &adapter->net_stats; |
2855 | } | 3020 | } |
2856 | 3021 | ||
@@ -2871,50 +3036,51 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
2871 | if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || | 3036 | if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || |
2872 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | 3037 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
2873 | DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); | 3038 | DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); |
2874 | return -EINVAL; | ||
2875 | } | ||
2876 | |||
2877 | #define MAX_STD_JUMBO_FRAME_SIZE 9234 | ||
2878 | /* might want this to be bigger enum check... */ | ||
2879 | /* 82571 controllers limit jumbo frame size to 10500 bytes */ | ||
2880 | if ((adapter->hw.mac_type == e1000_82571 || | ||
2881 | adapter->hw.mac_type == e1000_82572) && | ||
2882 | max_frame > MAX_STD_JUMBO_FRAME_SIZE) { | ||
2883 | DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported " | ||
2884 | "on 82571 and 82572 controllers.\n"); | ||
2885 | return -EINVAL; | 3039 | return -EINVAL; |
2886 | } | 3040 | } |
2887 | 3041 | ||
2888 | if(adapter->hw.mac_type == e1000_82573 && | 3042 | /* Adapter-specific max frame size limits. */ |
2889 | max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { | 3043 | switch (adapter->hw.mac_type) { |
2890 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported " | 3044 | case e1000_82542_rev2_0: |
2891 | "on 82573\n"); | 3045 | case e1000_82542_rev2_1: |
2892 | return -EINVAL; | 3046 | case e1000_82573: |
2893 | } | 3047 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { |
2894 | 3048 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); | |
2895 | if(adapter->hw.mac_type > e1000_82547_rev_2) { | 3049 | return -EINVAL; |
2896 | adapter->rx_buffer_len = max_frame; | 3050 | } |
2897 | E1000_ROUNDUP(adapter->rx_buffer_len, 1024); | 3051 | break; |
2898 | } else { | 3052 | case e1000_82571: |
2899 | if(unlikely((adapter->hw.mac_type < e1000_82543) && | 3053 | case e1000_82572: |
2900 | (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) { | 3054 | #define MAX_STD_JUMBO_FRAME_SIZE 9234 |
2901 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported " | 3055 | if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { |
2902 | "on 82542\n"); | 3056 | DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n"); |
2903 | return -EINVAL; | 3057 | return -EINVAL; |
2904 | |||
2905 | } else { | ||
2906 | if(max_frame <= E1000_RXBUFFER_2048) { | ||
2907 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; | ||
2908 | } else if(max_frame <= E1000_RXBUFFER_4096) { | ||
2909 | adapter->rx_buffer_len = E1000_RXBUFFER_4096; | ||
2910 | } else if(max_frame <= E1000_RXBUFFER_8192) { | ||
2911 | adapter->rx_buffer_len = E1000_RXBUFFER_8192; | ||
2912 | } else if(max_frame <= E1000_RXBUFFER_16384) { | ||
2913 | adapter->rx_buffer_len = E1000_RXBUFFER_16384; | ||
2914 | } | ||
2915 | } | 3058 | } |
3059 | break; | ||
3060 | default: | ||
3061 | /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ | ||
3062 | break; | ||
2916 | } | 3063 | } |
2917 | 3064 | ||
3065 | /* since the driver code now supports splitting a packet across | ||
3066 | * multiple descriptors, most of the fifo related limitations on | ||
3067 | * jumbo frame traffic have gone away. | ||
3068 | * simply use 2k descriptors for everything. | ||
3069 | * | ||
3070 | * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | ||
3071 | * means we reserve 2 more, this pushes us to allocate from the next | ||
3072 | * larger slab size | ||
3073 | * i.e. RXBUFFER_2048 --> size-4096 slab */ | ||
3074 | |||
3075 | /* recent hardware supports 1KB granularity */ | ||
3076 | if (adapter->hw.mac_type > e1000_82547_rev_2) { | ||
3077 | adapter->rx_buffer_len = | ||
3078 | ((max_frame < E1000_RXBUFFER_2048) ? | ||
3079 | max_frame : E1000_RXBUFFER_2048); | ||
3080 | E1000_ROUNDUP(adapter->rx_buffer_len, 1024); | ||
3081 | } else | ||
3082 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; | ||
3083 | |||
2918 | netdev->mtu = new_mtu; | 3084 | netdev->mtu = new_mtu; |
2919 | 3085 | ||
2920 | if(netif_running(netdev)) { | 3086 | if(netif_running(netdev)) { |
@@ -3037,12 +3203,11 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3037 | 3203 | ||
3038 | adapter->net_stats.rx_errors = adapter->stats.rxerrc + | 3204 | adapter->net_stats.rx_errors = adapter->stats.rxerrc + |
3039 | adapter->stats.crcerrs + adapter->stats.algnerrc + | 3205 | adapter->stats.crcerrs + adapter->stats.algnerrc + |
3040 | adapter->stats.rlec + adapter->stats.mpc + | 3206 | adapter->stats.rlec + adapter->stats.cexterr; |
3041 | adapter->stats.cexterr; | 3207 | adapter->net_stats.rx_dropped = 0; |
3042 | adapter->net_stats.rx_length_errors = adapter->stats.rlec; | 3208 | adapter->net_stats.rx_length_errors = adapter->stats.rlec; |
3043 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; | 3209 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; |
3044 | adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; | 3210 | adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; |
3045 | adapter->net_stats.rx_fifo_errors = adapter->stats.mpc; | ||
3046 | adapter->net_stats.rx_missed_errors = adapter->stats.mpc; | 3211 | adapter->net_stats.rx_missed_errors = adapter->stats.mpc; |
3047 | 3212 | ||
3048 | /* Tx Errors */ | 3213 | /* Tx Errors */ |
@@ -3110,12 +3275,24 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3110 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3275 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3111 | struct e1000_hw *hw = &adapter->hw; | 3276 | struct e1000_hw *hw = &adapter->hw; |
3112 | uint32_t icr = E1000_READ_REG(hw, ICR); | 3277 | uint32_t icr = E1000_READ_REG(hw, ICR); |
3113 | #if defined(CONFIG_E1000_NAPI) && defined(CONFIG_E1000_MQ) || !defined(CONFIG_E1000_NAPI) | 3278 | #ifndef CONFIG_E1000_NAPI |
3114 | int i; | 3279 | int i; |
3280 | #else | ||
3281 | /* Interrupt Auto-Mask...upon reading ICR, | ||
3282 | * interrupts are masked. No need for the | ||
3283 | * IMC write, but it does mean we should | ||
3284 | * account for it ASAP. */ | ||
3285 | if (likely(hw->mac_type >= e1000_82571)) | ||
3286 | atomic_inc(&adapter->irq_sem); | ||
3115 | #endif | 3287 | #endif |
3116 | 3288 | ||
3117 | if(unlikely(!icr)) | 3289 | if (unlikely(!icr)) { |
3290 | #ifdef CONFIG_E1000_NAPI | ||
3291 | if (hw->mac_type >= e1000_82571) | ||
3292 | e1000_irq_enable(adapter); | ||
3293 | #endif | ||
3118 | return IRQ_NONE; /* Not our interrupt */ | 3294 | return IRQ_NONE; /* Not our interrupt */ |
3295 | } | ||
3119 | 3296 | ||
3120 | if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { | 3297 | if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { |
3121 | hw->get_link_status = 1; | 3298 | hw->get_link_status = 1; |
@@ -3123,19 +3300,19 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3123 | } | 3300 | } |
3124 | 3301 | ||
3125 | #ifdef CONFIG_E1000_NAPI | 3302 | #ifdef CONFIG_E1000_NAPI |
3126 | atomic_inc(&adapter->irq_sem); | 3303 | if (unlikely(hw->mac_type < e1000_82571)) { |
3127 | E1000_WRITE_REG(hw, IMC, ~0); | 3304 | atomic_inc(&adapter->irq_sem); |
3128 | E1000_WRITE_FLUSH(hw); | 3305 | E1000_WRITE_REG(hw, IMC, ~0); |
3306 | E1000_WRITE_FLUSH(hw); | ||
3307 | } | ||
3129 | #ifdef CONFIG_E1000_MQ | 3308 | #ifdef CONFIG_E1000_MQ |
3130 | if (atomic_read(&adapter->rx_sched_call_data.count) == 0) { | 3309 | if (atomic_read(&adapter->rx_sched_call_data.count) == 0) { |
3131 | cpu_set(adapter->cpu_for_queue[0], | 3310 | /* We must setup the cpumask once count == 0 since |
3132 | adapter->rx_sched_call_data.cpumask); | 3311 | * each cpu bit is cleared when the work is done. */ |
3133 | for (i = 1; i < adapter->num_queues; i++) { | 3312 | adapter->rx_sched_call_data.cpumask = adapter->cpumask; |
3134 | cpu_set(adapter->cpu_for_queue[i], | 3313 | atomic_add(adapter->num_rx_queues - 1, &adapter->irq_sem); |
3135 | adapter->rx_sched_call_data.cpumask); | 3314 | atomic_set(&adapter->rx_sched_call_data.count, |
3136 | atomic_inc(&adapter->irq_sem); | 3315 | adapter->num_rx_queues); |
3137 | } | ||
3138 | atomic_set(&adapter->rx_sched_call_data.count, i); | ||
3139 | smp_call_async_mask(&adapter->rx_sched_call_data); | 3316 | smp_call_async_mask(&adapter->rx_sched_call_data); |
3140 | } else { | 3317 | } else { |
3141 | printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count)); | 3318 | printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count)); |
@@ -3187,7 +3364,7 @@ e1000_clean(struct net_device *poll_dev, int *budget) | |||
3187 | { | 3364 | { |
3188 | struct e1000_adapter *adapter; | 3365 | struct e1000_adapter *adapter; |
3189 | int work_to_do = min(*budget, poll_dev->quota); | 3366 | int work_to_do = min(*budget, poll_dev->quota); |
3190 | int tx_cleaned, i = 0, work_done = 0; | 3367 | int tx_cleaned = 0, i = 0, work_done = 0; |
3191 | 3368 | ||
3192 | /* Must NOT use netdev_priv macro here. */ | 3369 | /* Must NOT use netdev_priv macro here. */ |
3193 | adapter = poll_dev->priv; | 3370 | adapter = poll_dev->priv; |
@@ -3198,11 +3375,23 @@ e1000_clean(struct net_device *poll_dev, int *budget) | |||
3198 | 3375 | ||
3199 | while (poll_dev != &adapter->polling_netdev[i]) { | 3376 | while (poll_dev != &adapter->polling_netdev[i]) { |
3200 | i++; | 3377 | i++; |
3201 | if (unlikely(i == adapter->num_queues)) | 3378 | if (unlikely(i == adapter->num_rx_queues)) |
3202 | BUG(); | 3379 | BUG(); |
3203 | } | 3380 | } |
3204 | 3381 | ||
3205 | tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]); | 3382 | if (likely(adapter->num_tx_queues == 1)) { |
3383 | /* e1000_clean is called per-cpu. This lock protects | ||
3384 | * tx_ring[0] from being cleaned by multiple cpus | ||
3385 | * simultaneously. A failure obtaining the lock means | ||
3386 | * tx_ring[0] is currently being cleaned anyway. */ | ||
3387 | if (spin_trylock(&adapter->tx_queue_lock)) { | ||
3388 | tx_cleaned = e1000_clean_tx_irq(adapter, | ||
3389 | &adapter->tx_ring[0]); | ||
3390 | spin_unlock(&adapter->tx_queue_lock); | ||
3391 | } | ||
3392 | } else | ||
3393 | tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]); | ||
3394 | |||
3206 | adapter->clean_rx(adapter, &adapter->rx_ring[i], | 3395 | adapter->clean_rx(adapter, &adapter->rx_ring[i], |
3207 | &work_done, work_to_do); | 3396 | &work_done, work_to_do); |
3208 | 3397 | ||
@@ -3247,17 +3436,19 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3247 | buffer_info = &tx_ring->buffer_info[i]; | 3436 | buffer_info = &tx_ring->buffer_info[i]; |
3248 | cleaned = (i == eop); | 3437 | cleaned = (i == eop); |
3249 | 3438 | ||
3439 | #ifdef CONFIG_E1000_MQ | ||
3440 | tx_ring->tx_stats.bytes += buffer_info->length; | ||
3441 | #endif | ||
3250 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); | 3442 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
3251 | 3443 | memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); | |
3252 | tx_desc->buffer_addr = 0; | ||
3253 | tx_desc->lower.data = 0; | ||
3254 | tx_desc->upper.data = 0; | ||
3255 | 3444 | ||
3256 | if(unlikely(++i == tx_ring->count)) i = 0; | 3445 | if(unlikely(++i == tx_ring->count)) i = 0; |
3257 | } | 3446 | } |
3258 | 3447 | ||
3259 | tx_ring->pkt++; | 3448 | #ifdef CONFIG_E1000_MQ |
3260 | 3449 | tx_ring->tx_stats.packets++; | |
3450 | #endif | ||
3451 | |||
3261 | eop = tx_ring->buffer_info[i].next_to_watch; | 3452 | eop = tx_ring->buffer_info[i].next_to_watch; |
3262 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | 3453 | eop_desc = E1000_TX_DESC(*tx_ring, eop); |
3263 | } | 3454 | } |
@@ -3276,32 +3467,31 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3276 | /* Detect a transmit hang in hardware, this serializes the | 3467 | /* Detect a transmit hang in hardware, this serializes the |
3277 | * check with the clearing of time_stamp and movement of i */ | 3468 | * check with the clearing of time_stamp and movement of i */ |
3278 | adapter->detect_tx_hung = FALSE; | 3469 | adapter->detect_tx_hung = FALSE; |
3279 | if (tx_ring->buffer_info[i].dma && | 3470 | if (tx_ring->buffer_info[eop].dma && |
3280 | time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ) | 3471 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + |
3472 | adapter->tx_timeout_factor * HZ) | ||
3281 | && !(E1000_READ_REG(&adapter->hw, STATUS) & | 3473 | && !(E1000_READ_REG(&adapter->hw, STATUS) & |
3282 | E1000_STATUS_TXOFF)) { | 3474 | E1000_STATUS_TXOFF)) { |
3283 | 3475 | ||
3284 | /* detected Tx unit hang */ | 3476 | /* detected Tx unit hang */ |
3285 | i = tx_ring->next_to_clean; | ||
3286 | eop = tx_ring->buffer_info[i].next_to_watch; | ||
3287 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | ||
3288 | DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" | 3477 | DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" |
3478 | " Tx Queue <%lu>\n" | ||
3289 | " TDH <%x>\n" | 3479 | " TDH <%x>\n" |
3290 | " TDT <%x>\n" | 3480 | " TDT <%x>\n" |
3291 | " next_to_use <%x>\n" | 3481 | " next_to_use <%x>\n" |
3292 | " next_to_clean <%x>\n" | 3482 | " next_to_clean <%x>\n" |
3293 | "buffer_info[next_to_clean]\n" | 3483 | "buffer_info[next_to_clean]\n" |
3294 | " dma <%llx>\n" | ||
3295 | " time_stamp <%lx>\n" | 3484 | " time_stamp <%lx>\n" |
3296 | " next_to_watch <%x>\n" | 3485 | " next_to_watch <%x>\n" |
3297 | " jiffies <%lx>\n" | 3486 | " jiffies <%lx>\n" |
3298 | " next_to_watch.status <%x>\n", | 3487 | " next_to_watch.status <%x>\n", |
3488 | (unsigned long)((tx_ring - adapter->tx_ring) / | ||
3489 | sizeof(struct e1000_tx_ring)), | ||
3299 | readl(adapter->hw.hw_addr + tx_ring->tdh), | 3490 | readl(adapter->hw.hw_addr + tx_ring->tdh), |
3300 | readl(adapter->hw.hw_addr + tx_ring->tdt), | 3491 | readl(adapter->hw.hw_addr + tx_ring->tdt), |
3301 | tx_ring->next_to_use, | 3492 | tx_ring->next_to_use, |
3302 | i, | 3493 | tx_ring->next_to_clean, |
3303 | (unsigned long long)tx_ring->buffer_info[i].dma, | 3494 | tx_ring->buffer_info[eop].time_stamp, |
3304 | tx_ring->buffer_info[i].time_stamp, | ||
3305 | eop, | 3495 | eop, |
3306 | jiffies, | 3496 | jiffies, |
3307 | eop_desc->upper.fields.status); | 3497 | eop_desc->upper.fields.status); |
@@ -3386,20 +3576,23 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3386 | uint32_t length; | 3576 | uint32_t length; |
3387 | uint8_t last_byte; | 3577 | uint8_t last_byte; |
3388 | unsigned int i; | 3578 | unsigned int i; |
3389 | boolean_t cleaned = FALSE; | 3579 | int cleaned_count = 0; |
3580 | boolean_t cleaned = FALSE, multi_descriptor = FALSE; | ||
3390 | 3581 | ||
3391 | i = rx_ring->next_to_clean; | 3582 | i = rx_ring->next_to_clean; |
3392 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 3583 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
3393 | 3584 | ||
3394 | while(rx_desc->status & E1000_RXD_STAT_DD) { | 3585 | while(rx_desc->status & E1000_RXD_STAT_DD) { |
3395 | buffer_info = &rx_ring->buffer_info[i]; | 3586 | buffer_info = &rx_ring->buffer_info[i]; |
3587 | u8 status; | ||
3396 | #ifdef CONFIG_E1000_NAPI | 3588 | #ifdef CONFIG_E1000_NAPI |
3397 | if(*work_done >= work_to_do) | 3589 | if(*work_done >= work_to_do) |
3398 | break; | 3590 | break; |
3399 | (*work_done)++; | 3591 | (*work_done)++; |
3400 | #endif | 3592 | #endif |
3593 | status = rx_desc->status; | ||
3401 | cleaned = TRUE; | 3594 | cleaned = TRUE; |
3402 | 3595 | cleaned_count++; | |
3403 | pci_unmap_single(pdev, | 3596 | pci_unmap_single(pdev, |
3404 | buffer_info->dma, | 3597 | buffer_info->dma, |
3405 | buffer_info->length, | 3598 | buffer_info->length, |
@@ -3433,18 +3626,40 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3433 | } | 3626 | } |
3434 | } | 3627 | } |
3435 | 3628 | ||
3436 | /* Good Receive */ | 3629 | /* code added for copybreak, this should improve |
3437 | skb_put(skb, length - ETHERNET_FCS_SIZE); | 3630 | * performance for small packets with large amounts |
3631 | * of reassembly being done in the stack */ | ||
3632 | #define E1000_CB_LENGTH 256 | ||
3633 | if ((length < E1000_CB_LENGTH) && | ||
3634 | !rx_ring->rx_skb_top && | ||
3635 | /* or maybe (status & E1000_RXD_STAT_EOP) && */ | ||
3636 | !multi_descriptor) { | ||
3637 | struct sk_buff *new_skb = | ||
3638 | dev_alloc_skb(length + NET_IP_ALIGN); | ||
3639 | if (new_skb) { | ||
3640 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
3641 | new_skb->dev = netdev; | ||
3642 | memcpy(new_skb->data - NET_IP_ALIGN, | ||
3643 | skb->data - NET_IP_ALIGN, | ||
3644 | length + NET_IP_ALIGN); | ||
3645 | /* save the skb in buffer_info as good */ | ||
3646 | buffer_info->skb = skb; | ||
3647 | skb = new_skb; | ||
3648 | skb_put(skb, length); | ||
3649 | } | ||
3650 | } | ||
3651 | |||
3652 | /* end copybreak code */ | ||
3438 | 3653 | ||
3439 | /* Receive Checksum Offload */ | 3654 | /* Receive Checksum Offload */ |
3440 | e1000_rx_checksum(adapter, | 3655 | e1000_rx_checksum(adapter, |
3441 | (uint32_t)(rx_desc->status) | | 3656 | (uint32_t)(status) | |
3442 | ((uint32_t)(rx_desc->errors) << 24), | 3657 | ((uint32_t)(rx_desc->errors) << 24), |
3443 | rx_desc->csum, skb); | 3658 | rx_desc->csum, skb); |
3444 | skb->protocol = eth_type_trans(skb, netdev); | 3659 | skb->protocol = eth_type_trans(skb, netdev); |
3445 | #ifdef CONFIG_E1000_NAPI | 3660 | #ifdef CONFIG_E1000_NAPI |
3446 | if(unlikely(adapter->vlgrp && | 3661 | if(unlikely(adapter->vlgrp && |
3447 | (rx_desc->status & E1000_RXD_STAT_VP))) { | 3662 | (status & E1000_RXD_STAT_VP))) { |
3448 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | 3663 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, |
3449 | le16_to_cpu(rx_desc->special) & | 3664 | le16_to_cpu(rx_desc->special) & |
3450 | E1000_RXD_SPC_VLAN_MASK); | 3665 | E1000_RXD_SPC_VLAN_MASK); |
@@ -3462,17 +3677,26 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3462 | } | 3677 | } |
3463 | #endif /* CONFIG_E1000_NAPI */ | 3678 | #endif /* CONFIG_E1000_NAPI */ |
3464 | netdev->last_rx = jiffies; | 3679 | netdev->last_rx = jiffies; |
3465 | rx_ring->pkt++; | 3680 | #ifdef CONFIG_E1000_MQ |
3681 | rx_ring->rx_stats.packets++; | ||
3682 | rx_ring->rx_stats.bytes += length; | ||
3683 | #endif | ||
3466 | 3684 | ||
3467 | next_desc: | 3685 | next_desc: |
3468 | rx_desc->status = 0; | 3686 | rx_desc->status = 0; |
3469 | buffer_info->skb = NULL; | ||
3470 | if(unlikely(++i == rx_ring->count)) i = 0; | ||
3471 | 3687 | ||
3472 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 3688 | /* return some buffers to hardware, one at a time is too slow */ |
3689 | if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { | ||
3690 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); | ||
3691 | cleaned_count = 0; | ||
3692 | } | ||
3693 | |||
3473 | } | 3694 | } |
3474 | rx_ring->next_to_clean = i; | 3695 | rx_ring->next_to_clean = i; |
3475 | adapter->alloc_rx_buf(adapter, rx_ring); | 3696 | |
3697 | cleaned_count = E1000_DESC_UNUSED(rx_ring); | ||
3698 | if (cleaned_count) | ||
3699 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); | ||
3476 | 3700 | ||
3477 | return cleaned; | 3701 | return cleaned; |
3478 | } | 3702 | } |
@@ -3501,6 +3725,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3501 | struct sk_buff *skb; | 3725 | struct sk_buff *skb; |
3502 | unsigned int i, j; | 3726 | unsigned int i, j; |
3503 | uint32_t length, staterr; | 3727 | uint32_t length, staterr; |
3728 | int cleaned_count = 0; | ||
3504 | boolean_t cleaned = FALSE; | 3729 | boolean_t cleaned = FALSE; |
3505 | 3730 | ||
3506 | i = rx_ring->next_to_clean; | 3731 | i = rx_ring->next_to_clean; |
@@ -3517,6 +3742,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3517 | (*work_done)++; | 3742 | (*work_done)++; |
3518 | #endif | 3743 | #endif |
3519 | cleaned = TRUE; | 3744 | cleaned = TRUE; |
3745 | cleaned_count++; | ||
3520 | pci_unmap_single(pdev, buffer_info->dma, | 3746 | pci_unmap_single(pdev, buffer_info->dma, |
3521 | buffer_info->length, | 3747 | buffer_info->length, |
3522 | PCI_DMA_FROMDEVICE); | 3748 | PCI_DMA_FROMDEVICE); |
@@ -3593,18 +3819,28 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3593 | } | 3819 | } |
3594 | #endif /* CONFIG_E1000_NAPI */ | 3820 | #endif /* CONFIG_E1000_NAPI */ |
3595 | netdev->last_rx = jiffies; | 3821 | netdev->last_rx = jiffies; |
3596 | rx_ring->pkt++; | 3822 | #ifdef CONFIG_E1000_MQ |
3823 | rx_ring->rx_stats.packets++; | ||
3824 | rx_ring->rx_stats.bytes += length; | ||
3825 | #endif | ||
3597 | 3826 | ||
3598 | next_desc: | 3827 | next_desc: |
3599 | rx_desc->wb.middle.status_error &= ~0xFF; | 3828 | rx_desc->wb.middle.status_error &= ~0xFF; |
3600 | buffer_info->skb = NULL; | 3829 | buffer_info->skb = NULL; |
3601 | if(unlikely(++i == rx_ring->count)) i = 0; | ||
3602 | 3830 | ||
3603 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | 3831 | /* return some buffers to hardware, one at a time is too slow */ |
3832 | if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { | ||
3833 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); | ||
3834 | cleaned_count = 0; | ||
3835 | } | ||
3836 | |||
3604 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); | 3837 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); |
3605 | } | 3838 | } |
3606 | rx_ring->next_to_clean = i; | 3839 | rx_ring->next_to_clean = i; |
3607 | adapter->alloc_rx_buf(adapter, rx_ring); | 3840 | |
3841 | cleaned_count = E1000_DESC_UNUSED(rx_ring); | ||
3842 | if (cleaned_count) | ||
3843 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); | ||
3608 | 3844 | ||
3609 | return cleaned; | 3845 | return cleaned; |
3610 | } | 3846 | } |
@@ -3616,7 +3852,8 @@ next_desc: | |||
3616 | 3852 | ||
3617 | static void | 3853 | static void |
3618 | e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | 3854 | e1000_alloc_rx_buffers(struct e1000_adapter *adapter, |
3619 | struct e1000_rx_ring *rx_ring) | 3855 | struct e1000_rx_ring *rx_ring, |
3856 | int cleaned_count) | ||
3620 | { | 3857 | { |
3621 | struct net_device *netdev = adapter->netdev; | 3858 | struct net_device *netdev = adapter->netdev; |
3622 | struct pci_dev *pdev = adapter->pdev; | 3859 | struct pci_dev *pdev = adapter->pdev; |
@@ -3629,11 +3866,18 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
3629 | i = rx_ring->next_to_use; | 3866 | i = rx_ring->next_to_use; |
3630 | buffer_info = &rx_ring->buffer_info[i]; | 3867 | buffer_info = &rx_ring->buffer_info[i]; |
3631 | 3868 | ||
3632 | while(!buffer_info->skb) { | 3869 | while (cleaned_count--) { |
3633 | skb = dev_alloc_skb(bufsz); | 3870 | if (!(skb = buffer_info->skb)) |
3871 | skb = dev_alloc_skb(bufsz); | ||
3872 | else { | ||
3873 | skb_trim(skb, 0); | ||
3874 | goto map_skb; | ||
3875 | } | ||
3876 | |||
3634 | 3877 | ||
3635 | if(unlikely(!skb)) { | 3878 | if(unlikely(!skb)) { |
3636 | /* Better luck next round */ | 3879 | /* Better luck next round */ |
3880 | adapter->alloc_rx_buff_failed++; | ||
3637 | break; | 3881 | break; |
3638 | } | 3882 | } |
3639 | 3883 | ||
@@ -3670,6 +3914,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
3670 | 3914 | ||
3671 | buffer_info->skb = skb; | 3915 | buffer_info->skb = skb; |
3672 | buffer_info->length = adapter->rx_buffer_len; | 3916 | buffer_info->length = adapter->rx_buffer_len; |
3917 | map_skb: | ||
3673 | buffer_info->dma = pci_map_single(pdev, | 3918 | buffer_info->dma = pci_map_single(pdev, |
3674 | skb->data, | 3919 | skb->data, |
3675 | adapter->rx_buffer_len, | 3920 | adapter->rx_buffer_len, |
@@ -3718,7 +3963,8 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
3718 | 3963 | ||
3719 | static void | 3964 | static void |
3720 | e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | 3965 | e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, |
3721 | struct e1000_rx_ring *rx_ring) | 3966 | struct e1000_rx_ring *rx_ring, |
3967 | int cleaned_count) | ||
3722 | { | 3968 | { |
3723 | struct net_device *netdev = adapter->netdev; | 3969 | struct net_device *netdev = adapter->netdev; |
3724 | struct pci_dev *pdev = adapter->pdev; | 3970 | struct pci_dev *pdev = adapter->pdev; |
@@ -3734,7 +3980,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
3734 | ps_page = &rx_ring->ps_page[i]; | 3980 | ps_page = &rx_ring->ps_page[i]; |
3735 | ps_page_dma = &rx_ring->ps_page_dma[i]; | 3981 | ps_page_dma = &rx_ring->ps_page_dma[i]; |
3736 | 3982 | ||
3737 | while(!buffer_info->skb) { | 3983 | while (cleaned_count--) { |
3738 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | 3984 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
3739 | 3985 | ||
3740 | for(j = 0; j < PS_PAGE_BUFFERS; j++) { | 3986 | for(j = 0; j < PS_PAGE_BUFFERS; j++) { |
@@ -4106,8 +4352,12 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) | |||
4106 | 4352 | ||
4107 | if((adapter->hw.mng_cookie.status & | 4353 | if((adapter->hw.mng_cookie.status & |
4108 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && | 4354 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
4109 | (vid == adapter->mng_vlan_id)) | 4355 | (vid == adapter->mng_vlan_id)) { |
4356 | /* release control to f/w */ | ||
4357 | e1000_release_hw_control(adapter); | ||
4110 | return; | 4358 | return; |
4359 | } | ||
4360 | |||
4111 | /* remove VID from filter table */ | 4361 | /* remove VID from filter table */ |
4112 | index = (vid >> 5) & 0x7F; | 4362 | index = (vid >> 5) & 0x7F; |
4113 | vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); | 4363 | vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); |
@@ -4173,8 +4423,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4173 | { | 4423 | { |
4174 | struct net_device *netdev = pci_get_drvdata(pdev); | 4424 | struct net_device *netdev = pci_get_drvdata(pdev); |
4175 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4425 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4176 | uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm; | 4426 | uint32_t ctrl, ctrl_ext, rctl, manc, status; |
4177 | uint32_t wufc = adapter->wol; | 4427 | uint32_t wufc = adapter->wol; |
4428 | int retval = 0; | ||
4178 | 4429 | ||
4179 | netif_device_detach(netdev); | 4430 | netif_device_detach(netdev); |
4180 | 4431 | ||
@@ -4220,13 +4471,21 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4220 | 4471 | ||
4221 | E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); | 4472 | E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); |
4222 | E1000_WRITE_REG(&adapter->hw, WUFC, wufc); | 4473 | E1000_WRITE_REG(&adapter->hw, WUFC, wufc); |
4223 | pci_enable_wake(pdev, 3, 1); | 4474 | retval = pci_enable_wake(pdev, PCI_D3hot, 1); |
4224 | pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */ | 4475 | if (retval) |
4476 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); | ||
4477 | retval = pci_enable_wake(pdev, PCI_D3cold, 1); | ||
4478 | if (retval) | ||
4479 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); | ||
4225 | } else { | 4480 | } else { |
4226 | E1000_WRITE_REG(&adapter->hw, WUC, 0); | 4481 | E1000_WRITE_REG(&adapter->hw, WUC, 0); |
4227 | E1000_WRITE_REG(&adapter->hw, WUFC, 0); | 4482 | E1000_WRITE_REG(&adapter->hw, WUFC, 0); |
4228 | pci_enable_wake(pdev, 3, 0); | 4483 | retval = pci_enable_wake(pdev, PCI_D3hot, 0); |
4229 | pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */ | 4484 | if (retval) |
4485 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); | ||
4486 | retval = pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */ | ||
4487 | if (retval) | ||
4488 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); | ||
4230 | } | 4489 | } |
4231 | 4490 | ||
4232 | pci_save_state(pdev); | 4491 | pci_save_state(pdev); |
@@ -4237,29 +4496,24 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4237 | if(manc & E1000_MANC_SMBUS_EN) { | 4496 | if(manc & E1000_MANC_SMBUS_EN) { |
4238 | manc |= E1000_MANC_ARP_EN; | 4497 | manc |= E1000_MANC_ARP_EN; |
4239 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | 4498 | E1000_WRITE_REG(&adapter->hw, MANC, manc); |
4240 | pci_enable_wake(pdev, 3, 1); | 4499 | retval = pci_enable_wake(pdev, PCI_D3hot, 1); |
4241 | pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */ | 4500 | if (retval) |
4501 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); | ||
4502 | retval = pci_enable_wake(pdev, PCI_D3cold, 1); | ||
4503 | if (retval) | ||
4504 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); | ||
4242 | } | 4505 | } |
4243 | } | 4506 | } |
4244 | 4507 | ||
4245 | switch(adapter->hw.mac_type) { | 4508 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
4246 | case e1000_82571: | 4509 | * would have already happened in close and is redundant. */ |
4247 | case e1000_82572: | 4510 | e1000_release_hw_control(adapter); |
4248 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | ||
4249 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, | ||
4250 | ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); | ||
4251 | break; | ||
4252 | case e1000_82573: | ||
4253 | swsm = E1000_READ_REG(&adapter->hw, SWSM); | ||
4254 | E1000_WRITE_REG(&adapter->hw, SWSM, | ||
4255 | swsm & ~E1000_SWSM_DRV_LOAD); | ||
4256 | break; | ||
4257 | default: | ||
4258 | break; | ||
4259 | } | ||
4260 | 4511 | ||
4261 | pci_disable_device(pdev); | 4512 | pci_disable_device(pdev); |
4262 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | 4513 | |
4514 | retval = pci_set_power_state(pdev, pci_choose_state(pdev, state)); | ||
4515 | if (retval) | ||
4516 | DPRINTK(PROBE, ERR, "Error in setting power state\n"); | ||
4263 | 4517 | ||
4264 | return 0; | 4518 | return 0; |
4265 | } | 4519 | } |
@@ -4269,16 +4523,21 @@ e1000_resume(struct pci_dev *pdev) | |||
4269 | { | 4523 | { |
4270 | struct net_device *netdev = pci_get_drvdata(pdev); | 4524 | struct net_device *netdev = pci_get_drvdata(pdev); |
4271 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4525 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4272 | uint32_t manc, ret_val, swsm; | 4526 | int retval; |
4273 | uint32_t ctrl_ext; | 4527 | uint32_t manc, ret_val; |
4274 | 4528 | ||
4275 | pci_set_power_state(pdev, PCI_D0); | 4529 | retval = pci_set_power_state(pdev, PCI_D0); |
4276 | pci_restore_state(pdev); | 4530 | if (retval) |
4531 | DPRINTK(PROBE, ERR, "Error in setting power state\n"); | ||
4277 | ret_val = pci_enable_device(pdev); | 4532 | ret_val = pci_enable_device(pdev); |
4278 | pci_set_master(pdev); | 4533 | pci_set_master(pdev); |
4279 | 4534 | ||
4280 | pci_enable_wake(pdev, PCI_D3hot, 0); | 4535 | retval = pci_enable_wake(pdev, PCI_D3hot, 0); |
4281 | pci_enable_wake(pdev, PCI_D3cold, 0); | 4536 | if (retval) |
4537 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); | ||
4538 | retval = pci_enable_wake(pdev, PCI_D3cold, 0); | ||
4539 | if (retval) | ||
4540 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); | ||
4282 | 4541 | ||
4283 | e1000_reset(adapter); | 4542 | e1000_reset(adapter); |
4284 | E1000_WRITE_REG(&adapter->hw, WUS, ~0); | 4543 | E1000_WRITE_REG(&adapter->hw, WUS, ~0); |
@@ -4295,21 +4554,13 @@ e1000_resume(struct pci_dev *pdev) | |||
4295 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | 4554 | E1000_WRITE_REG(&adapter->hw, MANC, manc); |
4296 | } | 4555 | } |
4297 | 4556 | ||
4298 | switch(adapter->hw.mac_type) { | 4557 | /* If the controller is 82573 and f/w is AMT, do not set |
4299 | case e1000_82571: | 4558 | * DRV_LOAD until the interface is up. For all other cases, |
4300 | case e1000_82572: | 4559 | * let the f/w know that the h/w is now under the control |
4301 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | 4560 | * of the driver. */ |
4302 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, | 4561 | if (adapter->hw.mac_type != e1000_82573 || |
4303 | ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); | 4562 | !e1000_check_mng_mode(&adapter->hw)) |
4304 | break; | 4563 | e1000_get_hw_control(adapter); |
4305 | case e1000_82573: | ||
4306 | swsm = E1000_READ_REG(&adapter->hw, SWSM); | ||
4307 | E1000_WRITE_REG(&adapter->hw, SWSM, | ||
4308 | swsm | E1000_SWSM_DRV_LOAD); | ||
4309 | break; | ||
4310 | default: | ||
4311 | break; | ||
4312 | } | ||
4313 | 4564 | ||
4314 | return 0; | 4565 | return 0; |
4315 | } | 4566 | } |
@@ -4327,6 +4578,9 @@ e1000_netpoll(struct net_device *netdev) | |||
4327 | disable_irq(adapter->pdev->irq); | 4578 | disable_irq(adapter->pdev->irq); |
4328 | e1000_intr(adapter->pdev->irq, netdev, NULL); | 4579 | e1000_intr(adapter->pdev->irq, netdev, NULL); |
4329 | e1000_clean_tx_irq(adapter, adapter->tx_ring); | 4580 | e1000_clean_tx_irq(adapter, adapter->tx_ring); |
4581 | #ifndef CONFIG_E1000_NAPI | ||
4582 | adapter->clean_rx(adapter, adapter->rx_ring); | ||
4583 | #endif | ||
4330 | enable_irq(adapter->pdev->irq); | 4584 | enable_irq(adapter->pdev->irq); |
4331 | } | 4585 | } |
4332 | #endif | 4586 | #endif |