aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000/e1000_main.c
diff options
context:
space:
mode:
authorMalli Chilakala <mallikarjuna.chilakala@intel.com>2005-04-28 22:43:52 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-05-12 20:48:54 -0400
commit2d7edb923a823660b081bd4c660300ee19adca8d (patch)
tree3442b72425638e8530d31490339567d42706534a /drivers/net/e1000/e1000_main.c
parentf0d11ed0b0650d2f93f56f65167c10a577c16c88 (diff)
[PATCH] e1000:82573 specific code & packet split code
82573 specific code & packet split code Signed-off-by: Mallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com> Signed-off-by: Ganesh Venkatesan <ganesh.venkatesan@intel.com> Signed-off-by: John Ronciak <john.ronciak@intel.com> diff -up net-drivers-2.6/drivers/net/e1000/e1000.h net-drivers-2.6/drivers/net/e1000.new/e1000.h
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r--drivers/net/e1000/e1000_main.c804
1 files changed, 691 insertions, 113 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 4cdf371961eb..5e6e1f7fd777 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -155,10 +155,14 @@ static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
155static int e1000_clean(struct net_device *netdev, int *budget); 155static int e1000_clean(struct net_device *netdev, int *budget);
156static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, 156static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
157 int *work_done, int work_to_do); 157 int *work_done, int work_to_do);
158static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
159 int *work_done, int work_to_do);
158#else 160#else
159static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter); 161static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter);
162static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter);
160#endif 163#endif
161static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter); 164static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter);
165static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter);
162static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 166static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
163static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 167static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
164 int cmd); 168 int cmd);
@@ -286,7 +290,29 @@ e1000_irq_enable(struct e1000_adapter *adapter)
286 E1000_WRITE_FLUSH(&adapter->hw); 290 E1000_WRITE_FLUSH(&adapter->hw);
287 } 291 }
288} 292}
289 293void
294e1000_update_mng_vlan(struct e1000_adapter *adapter)
295{
296 struct net_device *netdev = adapter->netdev;
297 uint16_t vid = adapter->hw.mng_cookie.vlan_id;
298 uint16_t old_vid = adapter->mng_vlan_id;
299 if(adapter->vlgrp) {
300 if(!adapter->vlgrp->vlan_devices[vid]) {
301 if(adapter->hw.mng_cookie.status &
302 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
303 e1000_vlan_rx_add_vid(netdev, vid);
304 adapter->mng_vlan_id = vid;
305 } else
306 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
307
308 if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
309 (vid != old_vid) &&
310 !adapter->vlgrp->vlan_devices[old_vid])
311 e1000_vlan_rx_kill_vid(netdev, old_vid);
312 }
313 }
314}
315
290int 316int
291e1000_up(struct e1000_adapter *adapter) 317e1000_up(struct e1000_adapter *adapter)
292{ 318{
@@ -310,7 +336,7 @@ e1000_up(struct e1000_adapter *adapter)
310 e1000_configure_tx(adapter); 336 e1000_configure_tx(adapter);
311 e1000_setup_rctl(adapter); 337 e1000_setup_rctl(adapter);
312 e1000_configure_rx(adapter); 338 e1000_configure_rx(adapter);
313 e1000_alloc_rx_buffers(adapter); 339 adapter->alloc_rx_buf(adapter);
314 340
315#ifdef CONFIG_PCI_MSI 341#ifdef CONFIG_PCI_MSI
316 if(adapter->hw.mac_type > e1000_82547_rev_2) { 342 if(adapter->hw.mac_type > e1000_82547_rev_2) {
@@ -366,8 +392,12 @@ e1000_down(struct e1000_adapter *adapter)
366 e1000_clean_rx_ring(adapter); 392 e1000_clean_rx_ring(adapter);
367 393
368 /* If WoL is not enabled 394 /* If WoL is not enabled
395 * and management mode is not IAMT
369 * Power down the PHY so no link is implied when interface is down */ 396 * Power down the PHY so no link is implied when interface is down */
370 if(!adapter->wol && adapter->hw.media_type == e1000_media_type_copper) { 397 if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
398 adapter->hw.media_type == e1000_media_type_copper &&
399 !e1000_check_mng_mode(&adapter->hw) &&
400 !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) {
371 uint16_t mii_reg; 401 uint16_t mii_reg;
372 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 402 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
373 mii_reg |= MII_CR_POWER_DOWN; 403 mii_reg |= MII_CR_POWER_DOWN;
@@ -379,28 +409,34 @@ e1000_down(struct e1000_adapter *adapter)
379void 409void
380e1000_reset(struct e1000_adapter *adapter) 410e1000_reset(struct e1000_adapter *adapter)
381{ 411{
382 uint32_t pba; 412 uint32_t pba, manc;
383 413
384 /* Repartition Pba for greater than 9k mtu 414 /* Repartition Pba for greater than 9k mtu
385 * To take effect CTRL.RST is required. 415 * To take effect CTRL.RST is required.
386 */ 416 */
387 417
388 if(adapter->hw.mac_type < e1000_82547) { 418 switch (adapter->hw.mac_type) {
389 if(adapter->rx_buffer_len > E1000_RXBUFFER_8192) 419 case e1000_82547:
390 pba = E1000_PBA_40K; 420 pba = E1000_PBA_30K;
391 else 421 break;
392 pba = E1000_PBA_48K; 422 case e1000_82573:
393 } else { 423 pba = E1000_PBA_12K;
394 if(adapter->rx_buffer_len > E1000_RXBUFFER_8192) 424 break;
395 pba = E1000_PBA_22K; 425 default:
396 else 426 pba = E1000_PBA_48K;
397 pba = E1000_PBA_30K; 427 break;
428 }
429
430
431
432 if(adapter->hw.mac_type == e1000_82547) {
398 adapter->tx_fifo_head = 0; 433 adapter->tx_fifo_head = 0;
399 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 434 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
400 adapter->tx_fifo_size = 435 adapter->tx_fifo_size =
401 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; 436 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
402 atomic_set(&adapter->tx_fifo_stall, 0); 437 atomic_set(&adapter->tx_fifo_stall, 0);
403 } 438 }
439
404 E1000_WRITE_REG(&adapter->hw, PBA, pba); 440 E1000_WRITE_REG(&adapter->hw, PBA, pba);
405 441
406 /* flow control settings */ 442 /* flow control settings */
@@ -412,17 +448,23 @@ e1000_reset(struct e1000_adapter *adapter)
412 adapter->hw.fc_send_xon = 1; 448 adapter->hw.fc_send_xon = 1;
413 adapter->hw.fc = adapter->hw.original_fc; 449 adapter->hw.fc = adapter->hw.original_fc;
414 450
451 /* Allow time for pending master requests to run */
415 e1000_reset_hw(&adapter->hw); 452 e1000_reset_hw(&adapter->hw);
416 if(adapter->hw.mac_type >= e1000_82544) 453 if(adapter->hw.mac_type >= e1000_82544)
417 E1000_WRITE_REG(&adapter->hw, WUC, 0); 454 E1000_WRITE_REG(&adapter->hw, WUC, 0);
418 if(e1000_init_hw(&adapter->hw)) 455 if(e1000_init_hw(&adapter->hw))
419 DPRINTK(PROBE, ERR, "Hardware Error\n"); 456 DPRINTK(PROBE, ERR, "Hardware Error\n");
420 457 e1000_update_mng_vlan(adapter);
421 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 458 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
422 E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE); 459 E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
423 460
424 e1000_reset_adaptive(&adapter->hw); 461 e1000_reset_adaptive(&adapter->hw);
425 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 462 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
463 if (adapter->en_mng_pt) {
464 manc = E1000_READ_REG(&adapter->hw, MANC);
465 manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST);
466 E1000_WRITE_REG(&adapter->hw, MANC, manc);
467 }
426} 468}
427 469
428/** 470/**
@@ -443,15 +485,13 @@ e1000_probe(struct pci_dev *pdev,
443{ 485{
444 struct net_device *netdev; 486 struct net_device *netdev;
445 struct e1000_adapter *adapter; 487 struct e1000_adapter *adapter;
488 unsigned long mmio_start, mmio_len;
489 uint32_t swsm;
490
446 static int cards_found = 0; 491 static int cards_found = 0;
447 unsigned long mmio_start; 492 int i, err, pci_using_dac;
448 int mmio_len;
449 int pci_using_dac;
450 int i;
451 int err;
452 uint16_t eeprom_data; 493 uint16_t eeprom_data;
453 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; 494 uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
454
455 if((err = pci_enable_device(pdev))) 495 if((err = pci_enable_device(pdev)))
456 return err; 496 return err;
457 497
@@ -538,6 +578,9 @@ e1000_probe(struct pci_dev *pdev,
538 if((err = e1000_sw_init(adapter))) 578 if((err = e1000_sw_init(adapter)))
539 goto err_sw_init; 579 goto err_sw_init;
540 580
581 if((err = e1000_check_phy_reset_block(&adapter->hw)))
582 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
583
541 if(adapter->hw.mac_type >= e1000_82543) { 584 if(adapter->hw.mac_type >= e1000_82543) {
542 netdev->features = NETIF_F_SG | 585 netdev->features = NETIF_F_SG |
543 NETIF_F_HW_CSUM | 586 NETIF_F_HW_CSUM |
@@ -550,6 +593,11 @@ e1000_probe(struct pci_dev *pdev,
550 if((adapter->hw.mac_type >= e1000_82544) && 593 if((adapter->hw.mac_type >= e1000_82544) &&
551 (adapter->hw.mac_type != e1000_82547)) 594 (adapter->hw.mac_type != e1000_82547))
552 netdev->features |= NETIF_F_TSO; 595 netdev->features |= NETIF_F_TSO;
596
597#ifdef NETIF_F_TSO_IPV6
598 if(adapter->hw.mac_type > e1000_82547_rev_2)
599 netdev->features |= NETIF_F_TSO_IPV6;
600#endif
553#endif 601#endif
554 if(pci_using_dac) 602 if(pci_using_dac)
555 netdev->features |= NETIF_F_HIGHDMA; 603 netdev->features |= NETIF_F_HIGHDMA;
@@ -557,6 +605,8 @@ e1000_probe(struct pci_dev *pdev,
557 /* hard_start_xmit is safe against parallel locking */ 605 /* hard_start_xmit is safe against parallel locking */
558 netdev->features |= NETIF_F_LLTX; 606 netdev->features |= NETIF_F_LLTX;
559 607
608 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
609
560 /* before reading the EEPROM, reset the controller to 610 /* before reading the EEPROM, reset the controller to
561 * put the device in a known good starting state */ 611 * put the device in a known good starting state */
562 612
@@ -646,6 +696,17 @@ e1000_probe(struct pci_dev *pdev,
646 /* reset the hardware with the new settings */ 696 /* reset the hardware with the new settings */
647 e1000_reset(adapter); 697 e1000_reset(adapter);
648 698
699 /* Let firmware know the driver has taken over */
700 switch(adapter->hw.mac_type) {
701 case e1000_82573:
702 swsm = E1000_READ_REG(&adapter->hw, SWSM);
703 E1000_WRITE_REG(&adapter->hw, SWSM,
704 swsm | E1000_SWSM_DRV_LOAD);
705 break;
706 default:
707 break;
708 }
709
649 strcpy(netdev->name, "eth%d"); 710 strcpy(netdev->name, "eth%d");
650 if((err = register_netdev(netdev))) 711 if((err = register_netdev(netdev)))
651 goto err_register; 712 goto err_register;
@@ -681,7 +742,7 @@ e1000_remove(struct pci_dev *pdev)
681{ 742{
682 struct net_device *netdev = pci_get_drvdata(pdev); 743 struct net_device *netdev = pci_get_drvdata(pdev);
683 struct e1000_adapter *adapter = netdev->priv; 744 struct e1000_adapter *adapter = netdev->priv;
684 uint32_t manc; 745 uint32_t manc, swsm;
685 746
686 flush_scheduled_work(); 747 flush_scheduled_work();
687 748
@@ -694,9 +755,21 @@ e1000_remove(struct pci_dev *pdev)
694 } 755 }
695 } 756 }
696 757
758 switch(adapter->hw.mac_type) {
759 case e1000_82573:
760 swsm = E1000_READ_REG(&adapter->hw, SWSM);
761 E1000_WRITE_REG(&adapter->hw, SWSM,
762 swsm & ~E1000_SWSM_DRV_LOAD);
763 break;
764
765 default:
766 break;
767 }
768
697 unregister_netdev(netdev); 769 unregister_netdev(netdev);
698 770
699 e1000_phy_hw_reset(&adapter->hw); 771 if(!e1000_check_phy_reset_block(&adapter->hw))
772 e1000_phy_hw_reset(&adapter->hw);
700 773
701 iounmap(adapter->hw.hw_addr); 774 iounmap(adapter->hw.hw_addr);
702 pci_release_regions(pdev); 775 pci_release_regions(pdev);
@@ -734,6 +807,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
734 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 807 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
735 808
736 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 809 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
810 adapter->rx_ps_bsize0 = E1000_RXBUFFER_256;
737 hw->max_frame_size = netdev->mtu + 811 hw->max_frame_size = netdev->mtu +
738 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 812 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
739 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 813 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
@@ -747,7 +821,10 @@ e1000_sw_init(struct e1000_adapter *adapter)
747 821
748 /* initialize eeprom parameters */ 822 /* initialize eeprom parameters */
749 823
750 e1000_init_eeprom_params(hw); 824 if(e1000_init_eeprom_params(hw)) {
825 E1000_ERR("EEPROM initialization failed\n");
826 return -EIO;
827 }
751 828
752 switch(hw->mac_type) { 829 switch(hw->mac_type) {
753 default: 830 default:
@@ -812,6 +889,11 @@ e1000_open(struct net_device *netdev)
812 889
813 if((err = e1000_up(adapter))) 890 if((err = e1000_up(adapter)))
814 goto err_up; 891 goto err_up;
892 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
893 if((adapter->hw.mng_cookie.status &
894 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
895 e1000_update_mng_vlan(adapter);
896 }
815 897
816 return E1000_SUCCESS; 898 return E1000_SUCCESS;
817 899
@@ -847,14 +929,18 @@ e1000_close(struct net_device *netdev)
847 e1000_free_tx_resources(adapter); 929 e1000_free_tx_resources(adapter);
848 e1000_free_rx_resources(adapter); 930 e1000_free_rx_resources(adapter);
849 931
932 if((adapter->hw.mng_cookie.status &
933 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
934 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
935 }
850 return 0; 936 return 0;
851} 937}
852 938
853/** 939/**
854 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary 940 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
855 * @adapter: address of board private structure 941 * @adapter: address of board private structure
856 * @begin: address of beginning of memory 942 * @start: address of beginning of memory
857 * @end: address of end of memory 943 * @len: length of memory
858 **/ 944 **/
859static inline boolean_t 945static inline boolean_t
860e1000_check_64k_bound(struct e1000_adapter *adapter, 946e1000_check_64k_bound(struct e1000_adapter *adapter,
@@ -1039,7 +1125,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter)
1039{ 1125{
1040 struct e1000_desc_ring *rxdr = &adapter->rx_ring; 1126 struct e1000_desc_ring *rxdr = &adapter->rx_ring;
1041 struct pci_dev *pdev = adapter->pdev; 1127 struct pci_dev *pdev = adapter->pdev;
1042 int size; 1128 int size, desc_len;
1043 1129
1044 size = sizeof(struct e1000_buffer) * rxdr->count; 1130 size = sizeof(struct e1000_buffer) * rxdr->count;
1045 rxdr->buffer_info = vmalloc(size); 1131 rxdr->buffer_info = vmalloc(size);
@@ -1050,9 +1136,35 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter)
1050 } 1136 }
1051 memset(rxdr->buffer_info, 0, size); 1137 memset(rxdr->buffer_info, 0, size);
1052 1138
1139 size = sizeof(struct e1000_ps_page) * rxdr->count;
1140 rxdr->ps_page = kmalloc(size, GFP_KERNEL);
1141 if(!rxdr->ps_page) {
1142 vfree(rxdr->buffer_info);
1143 DPRINTK(PROBE, ERR,
1144 "Unable to allocate memory for the receive descriptor ring\n");
1145 return -ENOMEM;
1146 }
1147 memset(rxdr->ps_page, 0, size);
1148
1149 size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
1150 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
1151 if(!rxdr->ps_page_dma) {
1152 vfree(rxdr->buffer_info);
1153 kfree(rxdr->ps_page);
1154 DPRINTK(PROBE, ERR,
1155 "Unable to allocate memory for the receive descriptor ring\n");
1156 return -ENOMEM;
1157 }
1158 memset(rxdr->ps_page_dma, 0, size);
1159
1160 if(adapter->hw.mac_type <= e1000_82547_rev_2)
1161 desc_len = sizeof(struct e1000_rx_desc);
1162 else
1163 desc_len = sizeof(union e1000_rx_desc_packet_split);
1164
1053 /* Round up to nearest 4K */ 1165 /* Round up to nearest 4K */
1054 1166
1055 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1167 rxdr->size = rxdr->count * desc_len;
1056 E1000_ROUNDUP(rxdr->size, 4096); 1168 E1000_ROUNDUP(rxdr->size, 4096);
1057 1169
1058 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1170 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
@@ -1062,6 +1174,8 @@ setup_rx_desc_die:
1062 DPRINTK(PROBE, ERR, 1174 DPRINTK(PROBE, ERR,
1063 "Unble to Allocate Memory for the Recieve descriptor ring\n"); 1175 "Unble to Allocate Memory for the Recieve descriptor ring\n");
1064 vfree(rxdr->buffer_info); 1176 vfree(rxdr->buffer_info);
1177 kfree(rxdr->ps_page);
1178 kfree(rxdr->ps_page_dma);
1065 return -ENOMEM; 1179 return -ENOMEM;
1066 } 1180 }
1067 1181
@@ -1089,6 +1203,8 @@ setup_rx_desc_die:
1089 "Unable to Allocate aligned Memory for the" 1203 "Unable to Allocate aligned Memory for the"
1090 " Receive descriptor ring\n"); 1204 " Receive descriptor ring\n");
1091 vfree(rxdr->buffer_info); 1205 vfree(rxdr->buffer_info);
1206 kfree(rxdr->ps_page);
1207 kfree(rxdr->ps_page_dma);
1092 return -ENOMEM; 1208 return -ENOMEM;
1093 } else { 1209 } else {
1094 /* free old, move on with the new one since its okay */ 1210 /* free old, move on with the new one since its okay */
@@ -1111,7 +1227,8 @@ setup_rx_desc_die:
1111static void 1227static void
1112e1000_setup_rctl(struct e1000_adapter *adapter) 1228e1000_setup_rctl(struct e1000_adapter *adapter)
1113{ 1229{
1114 uint32_t rctl; 1230 uint32_t rctl, rfctl;
1231 uint32_t psrctl = 0;
1115 1232
1116 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1233 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1117 1234
@@ -1126,24 +1243,69 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1126 else 1243 else
1127 rctl &= ~E1000_RCTL_SBP; 1244 rctl &= ~E1000_RCTL_SBP;
1128 1245
1246 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1247 rctl &= ~E1000_RCTL_LPE;
1248 else
1249 rctl |= E1000_RCTL_LPE;
1250
1129 /* Setup buffer sizes */ 1251 /* Setup buffer sizes */
1130 rctl &= ~(E1000_RCTL_SZ_4096); 1252 if(adapter->hw.mac_type == e1000_82573) {
1131 rctl |= (E1000_RCTL_BSEX | E1000_RCTL_LPE); 1253 /* We can now specify buffers in 1K increments.
1132 switch (adapter->rx_buffer_len) { 1254 * BSIZE and BSEX are ignored in this case. */
1133 case E1000_RXBUFFER_2048: 1255 rctl |= adapter->rx_buffer_len << 0x11;
1134 default: 1256 } else {
1135 rctl |= E1000_RCTL_SZ_2048; 1257 rctl &= ~E1000_RCTL_SZ_4096;
1136 rctl &= ~(E1000_RCTL_BSEX | E1000_RCTL_LPE); 1258 rctl |= E1000_RCTL_BSEX;
1137 break; 1259 switch (adapter->rx_buffer_len) {
1138 case E1000_RXBUFFER_4096: 1260 case E1000_RXBUFFER_2048:
1139 rctl |= E1000_RCTL_SZ_4096; 1261 default:
1140 break; 1262 rctl |= E1000_RCTL_SZ_2048;
1141 case E1000_RXBUFFER_8192: 1263 rctl &= ~E1000_RCTL_BSEX;
1142 rctl |= E1000_RCTL_SZ_8192; 1264 break;
1143 break; 1265 case E1000_RXBUFFER_4096:
1144 case E1000_RXBUFFER_16384: 1266 rctl |= E1000_RCTL_SZ_4096;
1145 rctl |= E1000_RCTL_SZ_16384; 1267 break;
1146 break; 1268 case E1000_RXBUFFER_8192:
1269 rctl |= E1000_RCTL_SZ_8192;
1270 break;
1271 case E1000_RXBUFFER_16384:
1272 rctl |= E1000_RCTL_SZ_16384;
1273 break;
1274 }
1275 }
1276
1277#ifdef CONFIG_E1000_PACKET_SPLIT
1278 /* 82571 and greater support packet-split where the protocol
1279 * header is placed in skb->data and the packet data is
1280 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1281 * In the case of a non-split, skb->data is linearly filled,
1282 * followed by the page buffers. Therefore, skb->data is
1283 * sized to hold the largest protocol header.
1284 */
1285 adapter->rx_ps = (adapter->hw.mac_type > e1000_82547_rev_2)
1286 && (adapter->netdev->mtu
1287 < ((3 * PAGE_SIZE) + adapter->rx_ps_bsize0));
1288#endif
1289 if(adapter->rx_ps) {
1290 /* Configure extra packet-split registers */
1291 rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
1292 rfctl |= E1000_RFCTL_EXTEN;
1293 /* disable IPv6 packet split support */
1294 rfctl |= E1000_RFCTL_IPV6_DIS;
1295 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
1296
1297 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
1298
1299 psrctl |= adapter->rx_ps_bsize0 >>
1300 E1000_PSRCTL_BSIZE0_SHIFT;
1301 psrctl |= PAGE_SIZE >>
1302 E1000_PSRCTL_BSIZE1_SHIFT;
1303 psrctl |= PAGE_SIZE <<
1304 E1000_PSRCTL_BSIZE2_SHIFT;
1305 psrctl |= PAGE_SIZE <<
1306 E1000_PSRCTL_BSIZE3_SHIFT;
1307
1308 E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
1147 } 1309 }
1148 1310
1149 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1311 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
@@ -1160,9 +1322,18 @@ static void
1160e1000_configure_rx(struct e1000_adapter *adapter) 1322e1000_configure_rx(struct e1000_adapter *adapter)
1161{ 1323{
1162 uint64_t rdba = adapter->rx_ring.dma; 1324 uint64_t rdba = adapter->rx_ring.dma;
1163 uint32_t rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc); 1325 uint32_t rdlen, rctl, rxcsum;
1164 uint32_t rctl; 1326
1165 uint32_t rxcsum; 1327 if(adapter->rx_ps) {
1328 rdlen = adapter->rx_ring.count *
1329 sizeof(union e1000_rx_desc_packet_split);
1330 adapter->clean_rx = e1000_clean_rx_irq_ps;
1331 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
1332 } else {
1333 rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc);
1334 adapter->clean_rx = e1000_clean_rx_irq;
1335 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1336 }
1166 1337
1167 /* disable receives while setting up the descriptors */ 1338 /* disable receives while setting up the descriptors */
1168 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1339 rctl = E1000_READ_REG(&adapter->hw, RCTL);
@@ -1189,13 +1360,27 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1189 E1000_WRITE_REG(&adapter->hw, RDT, 0); 1360 E1000_WRITE_REG(&adapter->hw, RDT, 0);
1190 1361
1191 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1362 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1192 if((adapter->hw.mac_type >= e1000_82543) && 1363 if(adapter->hw.mac_type >= e1000_82543) {
1193 (adapter->rx_csum == TRUE)) {
1194 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM); 1364 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
1195 rxcsum |= E1000_RXCSUM_TUOFL; 1365 if(adapter->rx_csum == TRUE) {
1366 rxcsum |= E1000_RXCSUM_TUOFL;
1367
1368 /* Enable 82573 IPv4 payload checksum for UDP fragments
1369 * Must be used in conjunction with packet-split. */
1370 if((adapter->hw.mac_type > e1000_82547_rev_2) &&
1371 (adapter->rx_ps)) {
1372 rxcsum |= E1000_RXCSUM_IPPCSE;
1373 }
1374 } else {
1375 rxcsum &= ~E1000_RXCSUM_TUOFL;
1376 /* don't need to clear IPPCSE as it defaults to 0 */
1377 }
1196 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum); 1378 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum);
1197 } 1379 }
1198 1380
1381 if (adapter->hw.mac_type == e1000_82573)
1382 E1000_WRITE_REG(&adapter->hw, ERT, 0x0100);
1383
1199 /* Enable Receives */ 1384 /* Enable Receives */
1200 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1385 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1201} 1386}
@@ -1298,6 +1483,10 @@ e1000_free_rx_resources(struct e1000_adapter *adapter)
1298 1483
1299 vfree(rx_ring->buffer_info); 1484 vfree(rx_ring->buffer_info);
1300 rx_ring->buffer_info = NULL; 1485 rx_ring->buffer_info = NULL;
1486 kfree(rx_ring->ps_page);
1487 rx_ring->ps_page = NULL;
1488 kfree(rx_ring->ps_page_dma);
1489 rx_ring->ps_page_dma = NULL;
1301 1490
1302 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 1491 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1303 1492
@@ -1314,16 +1503,19 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter)
1314{ 1503{
1315 struct e1000_desc_ring *rx_ring = &adapter->rx_ring; 1504 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1316 struct e1000_buffer *buffer_info; 1505 struct e1000_buffer *buffer_info;
1506 struct e1000_ps_page *ps_page;
1507 struct e1000_ps_page_dma *ps_page_dma;
1317 struct pci_dev *pdev = adapter->pdev; 1508 struct pci_dev *pdev = adapter->pdev;
1318 unsigned long size; 1509 unsigned long size;
1319 unsigned int i; 1510 unsigned int i, j;
1320 1511
1321 /* Free all the Rx ring sk_buffs */ 1512 /* Free all the Rx ring sk_buffs */
1322 1513
1323 for(i = 0; i < rx_ring->count; i++) { 1514 for(i = 0; i < rx_ring->count; i++) {
1324 buffer_info = &rx_ring->buffer_info[i]; 1515 buffer_info = &rx_ring->buffer_info[i];
1325 if(buffer_info->skb) { 1516 if(buffer_info->skb) {
1326 1517 ps_page = &rx_ring->ps_page[i];
1518 ps_page_dma = &rx_ring->ps_page_dma[i];
1327 pci_unmap_single(pdev, 1519 pci_unmap_single(pdev,
1328 buffer_info->dma, 1520 buffer_info->dma,
1329 buffer_info->length, 1521 buffer_info->length,
@@ -1331,11 +1523,25 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter)
1331 1523
1332 dev_kfree_skb(buffer_info->skb); 1524 dev_kfree_skb(buffer_info->skb);
1333 buffer_info->skb = NULL; 1525 buffer_info->skb = NULL;
1526
1527 for(j = 0; j < PS_PAGE_BUFFERS; j++) {
1528 if(!ps_page->ps_page[j]) break;
1529 pci_unmap_single(pdev,
1530 ps_page_dma->ps_page_dma[j],
1531 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1532 ps_page_dma->ps_page_dma[j] = 0;
1533 put_page(ps_page->ps_page[j]);
1534 ps_page->ps_page[j] = NULL;
1535 }
1334 } 1536 }
1335 } 1537 }
1336 1538
1337 size = sizeof(struct e1000_buffer) * rx_ring->count; 1539 size = sizeof(struct e1000_buffer) * rx_ring->count;
1338 memset(rx_ring->buffer_info, 0, size); 1540 memset(rx_ring->buffer_info, 0, size);
1541 size = sizeof(struct e1000_ps_page) * rx_ring->count;
1542 memset(rx_ring->ps_page, 0, size);
1543 size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
1544 memset(rx_ring->ps_page_dma, 0, size);
1339 1545
1340 /* Zero out the descriptor ring */ 1546 /* Zero out the descriptor ring */
1341 1547
@@ -1573,6 +1779,11 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
1573 uint32_t link; 1779 uint32_t link;
1574 1780
1575 e1000_check_for_link(&adapter->hw); 1781 e1000_check_for_link(&adapter->hw);
1782 if (adapter->hw.mac_type == e1000_82573) {
1783 e1000_enable_tx_pkt_filtering(&adapter->hw);
1784 if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
1785 e1000_update_mng_vlan(adapter);
1786 }
1576 1787
1577 if((adapter->hw.media_type == e1000_media_type_internal_serdes) && 1788 if((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
1578 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) 1789 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
@@ -1659,6 +1870,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
1659#define E1000_TX_FLAGS_CSUM 0x00000001 1870#define E1000_TX_FLAGS_CSUM 0x00000001
1660#define E1000_TX_FLAGS_VLAN 0x00000002 1871#define E1000_TX_FLAGS_VLAN 0x00000002
1661#define E1000_TX_FLAGS_TSO 0x00000004 1872#define E1000_TX_FLAGS_TSO 0x00000004
1873#define E1000_TX_FLAGS_IPV4 0x00000008
1662#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 1874#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
1663#define E1000_TX_FLAGS_VLAN_SHIFT 16 1875#define E1000_TX_FLAGS_VLAN_SHIFT 16
1664 1876
@@ -1669,7 +1881,7 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1669 struct e1000_context_desc *context_desc; 1881 struct e1000_context_desc *context_desc;
1670 unsigned int i; 1882 unsigned int i;
1671 uint32_t cmd_length = 0; 1883 uint32_t cmd_length = 0;
1672 uint16_t ipcse, tucse, mss; 1884 uint16_t ipcse = 0, tucse, mss;
1673 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 1885 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1674 int err; 1886 int err;
1675 1887
@@ -1682,23 +1894,37 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1682 1894
1683 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 1895 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1684 mss = skb_shinfo(skb)->tso_size; 1896 mss = skb_shinfo(skb)->tso_size;
1685 skb->nh.iph->tot_len = 0; 1897 if(skb->protocol == ntohs(ETH_P_IP)) {
1686 skb->nh.iph->check = 0; 1898 skb->nh.iph->tot_len = 0;
1687 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr, 1899 skb->nh.iph->check = 0;
1688 skb->nh.iph->daddr, 1900 skb->h.th->check =
1689 0, 1901 ~csum_tcpudp_magic(skb->nh.iph->saddr,
1690 IPPROTO_TCP, 1902 skb->nh.iph->daddr,
1691 0); 1903 0,
1904 IPPROTO_TCP,
1905 0);
1906 cmd_length = E1000_TXD_CMD_IP;
1907 ipcse = skb->h.raw - skb->data - 1;
1908#ifdef NETIF_F_TSO_IPV6
1909 } else if(skb->protocol == ntohs(ETH_P_IPV6)) {
1910 skb->nh.ipv6h->payload_len = 0;
1911 skb->h.th->check =
1912 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
1913 &skb->nh.ipv6h->daddr,
1914 0,
1915 IPPROTO_TCP,
1916 0);
1917 ipcse = 0;
1918#endif
1919 }
1692 ipcss = skb->nh.raw - skb->data; 1920 ipcss = skb->nh.raw - skb->data;
1693 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; 1921 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1694 ipcse = skb->h.raw - skb->data - 1;
1695 tucss = skb->h.raw - skb->data; 1922 tucss = skb->h.raw - skb->data;
1696 tucso = (void *)&(skb->h.th->check) - (void *)skb->data; 1923 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
1697 tucse = 0; 1924 tucse = 0;
1698 1925
1699 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 1926 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
1700 E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP | 1927 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
1701 (skb->len - (hdr_len)));
1702 1928
1703 i = adapter->tx_ring.next_to_use; 1929 i = adapter->tx_ring.next_to_use;
1704 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); 1930 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
@@ -1866,7 +2092,10 @@ e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
1866 if(likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2092 if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {
1867 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2093 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
1868 E1000_TXD_CMD_TSE; 2094 E1000_TXD_CMD_TSE;
1869 txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 2095 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2096
2097 if(likely(tx_flags & E1000_TX_FLAGS_IPV4))
2098 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
1870 } 2099 }
1871 2100
1872 if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 2101 if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
@@ -1941,6 +2170,53 @@ no_fifo_stall_required:
1941 return 0; 2170 return 0;
1942} 2171}
1943 2172
2173#define MINIMUM_DHCP_PACKET_SIZE 282
2174static inline int
2175e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
2176{
2177 struct e1000_hw *hw = &adapter->hw;
2178 uint16_t length, offset;
2179 if(vlan_tx_tag_present(skb)) {
2180 if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
2181 ( adapter->hw.mng_cookie.status &
2182 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
2183 return 0;
2184 }
2185 if(htons(ETH_P_IP) == skb->protocol) {
2186 const struct iphdr *ip = skb->nh.iph;
2187 if(IPPROTO_UDP == ip->protocol) {
2188 struct udphdr *udp = (struct udphdr *)(skb->h.uh);
2189 if(ntohs(udp->dest) == 67) {
2190 offset = (uint8_t *)udp + 8 - skb->data;
2191 length = skb->len - offset;
2192
2193 return e1000_mng_write_dhcp_info(hw,
2194 (uint8_t *)udp + 8, length);
2195 }
2196 }
2197 } else if((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
2198 struct ethhdr *eth = (struct ethhdr *) skb->data;
2199 if((htons(ETH_P_IP) == eth->h_proto)) {
2200 const struct iphdr *ip =
2201 (struct iphdr *)((uint8_t *)skb->data+14);
2202 if(IPPROTO_UDP == ip->protocol) {
2203 struct udphdr *udp =
2204 (struct udphdr *)((uint8_t *)ip +
2205 (ip->ihl << 2));
2206 if(ntohs(udp->dest) == 67) {
2207 offset = (uint8_t *)udp + 8 - skb->data;
2208 length = skb->len - offset;
2209
2210 return e1000_mng_write_dhcp_info(hw,
2211 (uint8_t *)udp + 8,
2212 length);
2213 }
2214 }
2215 }
2216 }
2217 return 0;
2218}
2219
1944#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) 2220#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
1945static int 2221static int
1946e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2222e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -2008,6 +2284,9 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2008 local_irq_restore(flags); 2284 local_irq_restore(flags);
2009 return NETDEV_TX_LOCKED; 2285 return NETDEV_TX_LOCKED;
2010 } 2286 }
2287 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
2288 e1000_transfer_dhcp_info(adapter, skb);
2289
2011 2290
2012 /* need: count + 2 desc gap to keep tail from touching 2291 /* need: count + 2 desc gap to keep tail from touching
2013 * head, otherwise try next time */ 2292 * head, otherwise try next time */
@@ -2044,6 +2323,12 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2044 else if(likely(e1000_tx_csum(adapter, skb))) 2323 else if(likely(e1000_tx_csum(adapter, skb)))
2045 tx_flags |= E1000_TX_FLAGS_CSUM; 2324 tx_flags |= E1000_TX_FLAGS_CSUM;
2046 2325
2326 /* Old method was to assume IPv4 packet by default if TSO was enabled.
2327 * 82573 hardware supports TSO capabilities for IPv6 as well...
2328 * no longer assume, we must. */
2329 if(likely(skb->protocol == ntohs(ETH_P_IP)))
2330 tx_flags |= E1000_TX_FLAGS_IPV4;
2331
2047 e1000_tx_queue(adapter, 2332 e1000_tx_queue(adapter,
2048 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss), 2333 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss),
2049 tx_flags); 2334 tx_flags);
@@ -2110,7 +2395,6 @@ static int
2110e1000_change_mtu(struct net_device *netdev, int new_mtu) 2395e1000_change_mtu(struct net_device *netdev, int new_mtu)
2111{ 2396{
2112 struct e1000_adapter *adapter = netdev->priv; 2397 struct e1000_adapter *adapter = netdev->priv;
2113 int old_mtu = adapter->rx_buffer_len;
2114 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 2398 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
2115 2399
2116 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 2400 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
@@ -2119,29 +2403,45 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
2119 return -EINVAL; 2403 return -EINVAL;
2120 } 2404 }
2121 2405
2122 if(max_frame <= MAXIMUM_ETHERNET_FRAME_SIZE) { 2406#define MAX_STD_JUMBO_FRAME_SIZE 9216
2123 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 2407 /* might want this to be bigger enum check... */
2124 2408 if (adapter->hw.mac_type == e1000_82573 &&
2125 } else if(adapter->hw.mac_type < e1000_82543) { 2409 max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
2126 DPRINTK(PROBE, ERR, "Jumbo Frames not supported on 82542\n"); 2410 DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
2411 "on 82573\n");
2127 return -EINVAL; 2412 return -EINVAL;
2413 }
2128 2414
2129 } else if(max_frame <= E1000_RXBUFFER_4096) { 2415 if(adapter->hw.mac_type > e1000_82547_rev_2) {
2130 adapter->rx_buffer_len = E1000_RXBUFFER_4096; 2416 adapter->rx_buffer_len = max_frame;
2131 2417 E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
2132 } else if(max_frame <= E1000_RXBUFFER_8192) {
2133 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
2134
2135 } else { 2418 } else {
2136 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 2419 if(unlikely((adapter->hw.mac_type < e1000_82543) &&
2420 (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
2421 DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
2422 "on 82542\n");
2423 return -EINVAL;
2424
2425 } else {
2426 if(max_frame <= E1000_RXBUFFER_2048) {
2427 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
2428 } else if(max_frame <= E1000_RXBUFFER_4096) {
2429 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
2430 } else if(max_frame <= E1000_RXBUFFER_8192) {
2431 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
2432 } else if(max_frame <= E1000_RXBUFFER_16384) {
2433 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
2434 }
2435 }
2137 } 2436 }
2138 2437
2139 if(old_mtu != adapter->rx_buffer_len && netif_running(netdev)) { 2438 netdev->mtu = new_mtu;
2439
2440 if(netif_running(netdev)) {
2140 e1000_down(adapter); 2441 e1000_down(adapter);
2141 e1000_up(adapter); 2442 e1000_up(adapter);
2142 } 2443 }
2143 2444
2144 netdev->mtu = new_mtu;
2145 adapter->hw.max_frame_size = max_frame; 2445 adapter->hw.max_frame_size = max_frame;
2146 2446
2147 return 0; 2447 return 0;
@@ -2232,6 +2532,17 @@ e1000_update_stats(struct e1000_adapter *adapter)
2232 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); 2532 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
2233 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); 2533 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
2234 } 2534 }
2535 if(hw->mac_type > e1000_82547_rev_2) {
2536 adapter->stats.iac += E1000_READ_REG(hw, IAC);
2537 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
2538 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
2539 adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
2540 adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
2541 adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
2542 adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
2543 adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
2544 adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
2545 }
2235 2546
2236 /* Fill out the OS statistics structure */ 2547 /* Fill out the OS statistics structure */
2237 2548
@@ -2337,7 +2648,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
2337 } 2648 }
2338 2649
2339 for(i = 0; i < E1000_MAX_INTR; i++) 2650 for(i = 0; i < E1000_MAX_INTR; i++)
2340 if(unlikely(!e1000_clean_rx_irq(adapter) & 2651 if(unlikely(!adapter->clean_rx(adapter) &
2341 !e1000_clean_tx_irq(adapter))) 2652 !e1000_clean_tx_irq(adapter)))
2342 break; 2653 break;
2343 2654
@@ -2363,7 +2674,7 @@ e1000_clean(struct net_device *netdev, int *budget)
2363 int work_done = 0; 2674 int work_done = 0;
2364 2675
2365 tx_cleaned = e1000_clean_tx_irq(adapter); 2676 tx_cleaned = e1000_clean_tx_irq(adapter);
2366 e1000_clean_rx_irq(adapter, &work_done, work_to_do); 2677 adapter->clean_rx(adapter, &work_done, work_to_do);
2367 2678
2368 *budget -= work_done; 2679 *budget -= work_done;
2369 netdev->quota -= work_done; 2680 netdev->quota -= work_done;
@@ -2501,41 +2812,57 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2501 2812
2502/** 2813/**
2503 * e1000_rx_checksum - Receive Checksum Offload for 82543 2814 * e1000_rx_checksum - Receive Checksum Offload for 82543
2504 * @adapter: board private structure 2815 * @adapter: board private structure
2505 * @rx_desc: receive descriptor 2816 * @status_err: receive descriptor status and error fields
2506 * @sk_buff: socket buffer with received data 2817 * @csum: receive descriptor csum field
2818 * @sk_buff: socket buffer with received data
2507 **/ 2819 **/
2508 2820
2509static inline void 2821static inline void
2510e1000_rx_checksum(struct e1000_adapter *adapter, 2822e1000_rx_checksum(struct e1000_adapter *adapter,
2511 struct e1000_rx_desc *rx_desc, 2823 uint32_t status_err, uint32_t csum,
2512 struct sk_buff *skb) 2824 struct sk_buff *skb)
2513{ 2825{
2826 uint16_t status = (uint16_t)status_err;
2827 uint8_t errors = (uint8_t)(status_err >> 24);
2828 skb->ip_summed = CHECKSUM_NONE;
2829
2514 /* 82543 or newer only */ 2830 /* 82543 or newer only */
2515 if(unlikely((adapter->hw.mac_type < e1000_82543) || 2831 if(unlikely(adapter->hw.mac_type < e1000_82543)) return;
2516 /* Ignore Checksum bit is set */ 2832 /* Ignore Checksum bit is set */
2517 (rx_desc->status & E1000_RXD_STAT_IXSM) || 2833 if(unlikely(status & E1000_RXD_STAT_IXSM)) return;
2518 /* TCP Checksum has not been calculated */ 2834 /* TCP/UDP checksum error bit is set */
2519 (!(rx_desc->status & E1000_RXD_STAT_TCPCS)))) { 2835 if(unlikely(errors & E1000_RXD_ERR_TCPE)) {
2520 skb->ip_summed = CHECKSUM_NONE;
2521 return;
2522 }
2523
2524 /* At this point we know the hardware did the TCP checksum */
2525 /* now look at the TCP checksum error bit */
2526 if(rx_desc->errors & E1000_RXD_ERR_TCPE) {
2527 /* let the stack verify checksum errors */ 2836 /* let the stack verify checksum errors */
2528 skb->ip_summed = CHECKSUM_NONE;
2529 adapter->hw_csum_err++; 2837 adapter->hw_csum_err++;
2838 return;
2839 }
2840 /* TCP/UDP Checksum has not been calculated */
2841 if(adapter->hw.mac_type <= e1000_82547_rev_2) {
2842 if(!(status & E1000_RXD_STAT_TCPCS))
2843 return;
2530 } else { 2844 } else {
2845 if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
2846 return;
2847 }
2848 /* It must be a TCP or UDP packet with a valid checksum */
2849 if (likely(status & E1000_RXD_STAT_TCPCS)) {
2531 /* TCP checksum is good */ 2850 /* TCP checksum is good */
2532 skb->ip_summed = CHECKSUM_UNNECESSARY; 2851 skb->ip_summed = CHECKSUM_UNNECESSARY;
2533 adapter->hw_csum_good++; 2852 } else if (adapter->hw.mac_type > e1000_82547_rev_2) {
2853 /* IP fragment with UDP payload */
2854 /* Hardware complements the payload checksum, so we undo it
2855 * and then put the value in host order for further stack use.
2856 */
2857 csum = ntohl(csum ^ 0xFFFF);
2858 skb->csum = csum;
2859 skb->ip_summed = CHECKSUM_HW;
2534 } 2860 }
2861 adapter->hw_csum_good++;
2535} 2862}
2536 2863
2537/** 2864/**
2538 * e1000_clean_rx_irq - Send received data up the network stack 2865 * e1000_clean_rx_irq - Send received data up the network stack; legacy
2539 * @adapter: board private structure 2866 * @adapter: board private structure
2540 **/ 2867 **/
2541 2868
@@ -2608,15 +2935,17 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
2608 skb_put(skb, length - ETHERNET_FCS_SIZE); 2935 skb_put(skb, length - ETHERNET_FCS_SIZE);
2609 2936
2610 /* Receive Checksum Offload */ 2937 /* Receive Checksum Offload */
2611 e1000_rx_checksum(adapter, rx_desc, skb); 2938 e1000_rx_checksum(adapter,
2612 2939 (uint32_t)(rx_desc->status) |
2940 ((uint32_t)(rx_desc->errors) << 24),
2941 rx_desc->csum, skb);
2613 skb->protocol = eth_type_trans(skb, netdev); 2942 skb->protocol = eth_type_trans(skb, netdev);
2614#ifdef CONFIG_E1000_NAPI 2943#ifdef CONFIG_E1000_NAPI
2615 if(unlikely(adapter->vlgrp && 2944 if(unlikely(adapter->vlgrp &&
2616 (rx_desc->status & E1000_RXD_STAT_VP))) { 2945 (rx_desc->status & E1000_RXD_STAT_VP))) {
2617 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 2946 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
2618 le16_to_cpu(rx_desc->special) & 2947 le16_to_cpu(rx_desc->special) &
2619 E1000_RXD_SPC_VLAN_MASK); 2948 E1000_RXD_SPC_VLAN_MASK);
2620 } else { 2949 } else {
2621 netif_receive_skb(skb); 2950 netif_receive_skb(skb);
2622 } 2951 }
@@ -2639,16 +2968,142 @@ next_desc:
2639 2968
2640 rx_desc = E1000_RX_DESC(*rx_ring, i); 2969 rx_desc = E1000_RX_DESC(*rx_ring, i);
2641 } 2970 }
2642
2643 rx_ring->next_to_clean = i; 2971 rx_ring->next_to_clean = i;
2972 adapter->alloc_rx_buf(adapter);
2973
2974 return cleaned;
2975}
2976
2977/**
2978 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
2979 * @adapter: board private structure
2980 **/
2981
2982static boolean_t
2983#ifdef CONFIG_E1000_NAPI
2984e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, int *work_done,
2985 int work_to_do)
2986#else
2987e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
2988#endif
2989{
2990 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2991 union e1000_rx_desc_packet_split *rx_desc;
2992 struct net_device *netdev = adapter->netdev;
2993 struct pci_dev *pdev = adapter->pdev;
2994 struct e1000_buffer *buffer_info;
2995 struct e1000_ps_page *ps_page;
2996 struct e1000_ps_page_dma *ps_page_dma;
2997 struct sk_buff *skb;
2998 unsigned int i, j;
2999 uint32_t length, staterr;
3000 boolean_t cleaned = FALSE;
3001
3002 i = rx_ring->next_to_clean;
3003 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3004 staterr = rx_desc->wb.middle.status_error;
3005
3006 while(staterr & E1000_RXD_STAT_DD) {
3007 buffer_info = &rx_ring->buffer_info[i];
3008 ps_page = &rx_ring->ps_page[i];
3009 ps_page_dma = &rx_ring->ps_page_dma[i];
3010#ifdef CONFIG_E1000_NAPI
3011 if(unlikely(*work_done >= work_to_do))
3012 break;
3013 (*work_done)++;
3014#endif
3015 cleaned = TRUE;
3016 pci_unmap_single(pdev, buffer_info->dma,
3017 buffer_info->length,
3018 PCI_DMA_FROMDEVICE);
3019
3020 skb = buffer_info->skb;
3021
3022 if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
3023 E1000_DBG("%s: Packet Split buffers didn't pick up"
3024 " the full packet\n", netdev->name);
3025 dev_kfree_skb_irq(skb);
3026 goto next_desc;
3027 }
2644 3028
2645 e1000_alloc_rx_buffers(adapter); 3029 if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
3030 dev_kfree_skb_irq(skb);
3031 goto next_desc;
3032 }
3033
3034 length = le16_to_cpu(rx_desc->wb.middle.length0);
3035
3036 if(unlikely(!length)) {
3037 E1000_DBG("%s: Last part of the packet spanning"
3038 " multiple descriptors\n", netdev->name);
3039 dev_kfree_skb_irq(skb);
3040 goto next_desc;
3041 }
3042
3043 /* Good Receive */
3044 skb_put(skb, length);
3045
3046 for(j = 0; j < PS_PAGE_BUFFERS; j++) {
3047 if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
3048 break;
3049
3050 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
3051 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3052 ps_page_dma->ps_page_dma[j] = 0;
3053 skb_shinfo(skb)->frags[j].page =
3054 ps_page->ps_page[j];
3055 ps_page->ps_page[j] = NULL;
3056 skb_shinfo(skb)->frags[j].page_offset = 0;
3057 skb_shinfo(skb)->frags[j].size = length;
3058 skb_shinfo(skb)->nr_frags++;
3059 skb->len += length;
3060 skb->data_len += length;
3061 }
3062
3063 e1000_rx_checksum(adapter, staterr,
3064 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
3065 skb->protocol = eth_type_trans(skb, netdev);
3066
3067#ifdef HAVE_RX_ZERO_COPY
3068 if(likely(rx_desc->wb.upper.header_status &
3069 E1000_RXDPS_HDRSTAT_HDRSP))
3070 skb_shinfo(skb)->zero_copy = TRUE;
3071#endif
3072#ifdef CONFIG_E1000_NAPI
3073 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3074 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3075 le16_to_cpu(rx_desc->wb.middle.vlan &
3076 E1000_RXD_SPC_VLAN_MASK));
3077 } else {
3078 netif_receive_skb(skb);
3079 }
3080#else /* CONFIG_E1000_NAPI */
3081 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3082 vlan_hwaccel_rx(skb, adapter->vlgrp,
3083 le16_to_cpu(rx_desc->wb.middle.vlan &
3084 E1000_RXD_SPC_VLAN_MASK));
3085 } else {
3086 netif_rx(skb);
3087 }
3088#endif /* CONFIG_E1000_NAPI */
3089 netdev->last_rx = jiffies;
3090
3091next_desc:
3092 rx_desc->wb.middle.status_error &= ~0xFF;
3093 buffer_info->skb = NULL;
3094 if(unlikely(++i == rx_ring->count)) i = 0;
3095
3096 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3097 staterr = rx_desc->wb.middle.status_error;
3098 }
3099 rx_ring->next_to_clean = i;
3100 adapter->alloc_rx_buf(adapter);
2646 3101
2647 return cleaned; 3102 return cleaned;
2648} 3103}
2649 3104
2650/** 3105/**
2651 * e1000_alloc_rx_buffers - Replace used receive buffers 3106 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
2652 * @adapter: address of board private structure 3107 * @adapter: address of board private structure
2653 **/ 3108 **/
2654 3109
@@ -2753,6 +3208,95 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
2753} 3208}
2754 3209
2755/** 3210/**
3211 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
3212 * @adapter: address of board private structure
3213 **/
3214
3215static void
3216e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter)
3217{
3218 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
3219 struct net_device *netdev = adapter->netdev;
3220 struct pci_dev *pdev = adapter->pdev;
3221 union e1000_rx_desc_packet_split *rx_desc;
3222 struct e1000_buffer *buffer_info;
3223 struct e1000_ps_page *ps_page;
3224 struct e1000_ps_page_dma *ps_page_dma;
3225 struct sk_buff *skb;
3226 unsigned int i, j;
3227
3228 i = rx_ring->next_to_use;
3229 buffer_info = &rx_ring->buffer_info[i];
3230 ps_page = &rx_ring->ps_page[i];
3231 ps_page_dma = &rx_ring->ps_page_dma[i];
3232
3233 while(!buffer_info->skb) {
3234 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3235
3236 for(j = 0; j < PS_PAGE_BUFFERS; j++) {
3237 if(unlikely(!ps_page->ps_page[j])) {
3238 ps_page->ps_page[j] =
3239 alloc_page(GFP_ATOMIC);
3240 if(unlikely(!ps_page->ps_page[j]))
3241 goto no_buffers;
3242 ps_page_dma->ps_page_dma[j] =
3243 pci_map_page(pdev,
3244 ps_page->ps_page[j],
3245 0, PAGE_SIZE,
3246 PCI_DMA_FROMDEVICE);
3247 }
3248 /* Refresh the desc even if buffer_addrs didn't
3249 * change because each write-back erases this info.
3250 */
3251 rx_desc->read.buffer_addr[j+1] =
3252 cpu_to_le64(ps_page_dma->ps_page_dma[j]);
3253 }
3254
3255 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
3256
3257 if(unlikely(!skb))
3258 break;
3259
3260 /* Make buffer alignment 2 beyond a 16 byte boundary
3261 * this will result in a 16 byte aligned IP header after
3262 * the 14 byte MAC header is removed
3263 */
3264 skb_reserve(skb, NET_IP_ALIGN);
3265
3266 skb->dev = netdev;
3267
3268 buffer_info->skb = skb;
3269 buffer_info->length = adapter->rx_ps_bsize0;
3270 buffer_info->dma = pci_map_single(pdev, skb->data,
3271 adapter->rx_ps_bsize0,
3272 PCI_DMA_FROMDEVICE);
3273
3274 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
3275
3276 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
3277 /* Force memory writes to complete before letting h/w
3278 * know there are new descriptors to fetch. (Only
3279 * applicable for weak-ordered memory model archs,
3280 * such as IA-64). */
3281 wmb();
3282 /* Hardware increments by 16 bytes, but packet split
3283 * descriptors are 32 bytes...so we increment tail
3284 * twice as much.
3285 */
3286 E1000_WRITE_REG(&adapter->hw, RDT, i<<1);
3287 }
3288
3289 if(unlikely(++i == rx_ring->count)) i = 0;
3290 buffer_info = &rx_ring->buffer_info[i];
3291 ps_page = &rx_ring->ps_page[i];
3292 ps_page_dma = &rx_ring->ps_page_dma[i];
3293 }
3294
3295no_buffers:
3296 rx_ring->next_to_use = i;
3297}
3298
3299/**
2756 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. 3300 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
2757 * @adapter: 3301 * @adapter:
2758 **/ 3302 **/
@@ -2986,6 +3530,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2986 rctl |= E1000_RCTL_VFE; 3530 rctl |= E1000_RCTL_VFE;
2987 rctl &= ~E1000_RCTL_CFIEN; 3531 rctl &= ~E1000_RCTL_CFIEN;
2988 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 3532 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
3533 e1000_update_mng_vlan(adapter);
2989 } else { 3534 } else {
2990 /* disable VLAN tag insert/strip */ 3535 /* disable VLAN tag insert/strip */
2991 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 3536 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
@@ -2996,6 +3541,10 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2996 rctl = E1000_READ_REG(&adapter->hw, RCTL); 3541 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2997 rctl &= ~E1000_RCTL_VFE; 3542 rctl &= ~E1000_RCTL_VFE;
2998 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 3543 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
3544 if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
3545 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3546 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3547 }
2999 } 3548 }
3000 3549
3001 e1000_irq_enable(adapter); 3550 e1000_irq_enable(adapter);
@@ -3006,7 +3555,10 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
3006{ 3555{
3007 struct e1000_adapter *adapter = netdev->priv; 3556 struct e1000_adapter *adapter = netdev->priv;
3008 uint32_t vfta, index; 3557 uint32_t vfta, index;
3009 3558 if((adapter->hw.mng_cookie.status &
3559 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
3560 (vid == adapter->mng_vlan_id))
3561 return;
3010 /* add VID to filter table */ 3562 /* add VID to filter table */
3011 index = (vid >> 5) & 0x7F; 3563 index = (vid >> 5) & 0x7F;
3012 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); 3564 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
@@ -3027,6 +3579,10 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
3027 3579
3028 e1000_irq_enable(adapter); 3580 e1000_irq_enable(adapter);
3029 3581
3582 if((adapter->hw.mng_cookie.status &
3583 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
3584 (vid == adapter->mng_vlan_id))
3585 return;
3030 /* remove VID from filter table */ 3586 /* remove VID from filter table */
3031 index = (vid >> 5) & 0x7F; 3587 index = (vid >> 5) & 0x7F;
3032 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); 3588 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
@@ -3102,7 +3658,7 @@ e1000_suspend(struct pci_dev *pdev, uint32_t state)
3102{ 3658{
3103 struct net_device *netdev = pci_get_drvdata(pdev); 3659 struct net_device *netdev = pci_get_drvdata(pdev);
3104 struct e1000_adapter *adapter = netdev->priv; 3660 struct e1000_adapter *adapter = netdev->priv;
3105 uint32_t ctrl, ctrl_ext, rctl, manc, status; 3661 uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm;
3106 uint32_t wufc = adapter->wol; 3662 uint32_t wufc = adapter->wol;
3107 3663
3108 netif_device_detach(netdev); 3664 netif_device_detach(netdev);
@@ -3144,6 +3700,9 @@ e1000_suspend(struct pci_dev *pdev, uint32_t state)
3144 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext); 3700 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
3145 } 3701 }
3146 3702
3703 /* Allow time for pending master requests to run */
3704 e1000_disable_pciex_master(&adapter->hw);
3705
3147 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); 3706 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
3148 E1000_WRITE_REG(&adapter->hw, WUFC, wufc); 3707 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
3149 pci_enable_wake(pdev, 3, 1); 3708 pci_enable_wake(pdev, 3, 1);
@@ -3168,6 +3727,16 @@ e1000_suspend(struct pci_dev *pdev, uint32_t state)
3168 } 3727 }
3169 } 3728 }
3170 3729
3730 switch(adapter->hw.mac_type) {
3731 case e1000_82573:
3732 swsm = E1000_READ_REG(&adapter->hw, SWSM);
3733 E1000_WRITE_REG(&adapter->hw, SWSM,
3734 swsm & ~E1000_SWSM_DRV_LOAD);
3735 break;
3736 default:
3737 break;
3738 }
3739
3171 pci_disable_device(pdev); 3740 pci_disable_device(pdev);
3172 3741
3173 state = (state > 0) ? 3 : 0; 3742 state = (state > 0) ? 3 : 0;
@@ -3182,7 +3751,7 @@ e1000_resume(struct pci_dev *pdev)
3182{ 3751{
3183 struct net_device *netdev = pci_get_drvdata(pdev); 3752 struct net_device *netdev = pci_get_drvdata(pdev);
3184 struct e1000_adapter *adapter = netdev->priv; 3753 struct e1000_adapter *adapter = netdev->priv;
3185 uint32_t manc, ret; 3754 uint32_t manc, ret, swsm;
3186 3755
3187 pci_set_power_state(pdev, 0); 3756 pci_set_power_state(pdev, 0);
3188 pci_restore_state(pdev); 3757 pci_restore_state(pdev);
@@ -3207,10 +3776,19 @@ e1000_resume(struct pci_dev *pdev)
3207 E1000_WRITE_REG(&adapter->hw, MANC, manc); 3776 E1000_WRITE_REG(&adapter->hw, MANC, manc);
3208 } 3777 }
3209 3778
3779 switch(adapter->hw.mac_type) {
3780 case e1000_82573:
3781 swsm = E1000_READ_REG(&adapter->hw, SWSM);
3782 E1000_WRITE_REG(&adapter->hw, SWSM,
3783 swsm | E1000_SWSM_DRV_LOAD);
3784 break;
3785 default:
3786 break;
3787 }
3788
3210 return 0; 3789 return 0;
3211} 3790}
3212#endif 3791#endif
3213
3214#ifdef CONFIG_NET_POLL_CONTROLLER 3792#ifdef CONFIG_NET_POLL_CONTROLLER
3215/* 3793/*
3216 * Polling 'interrupt' - used by things like netconsole to send skbs 3794 * Polling 'interrupt' - used by things like netconsole to send skbs