diff options
author | Jesse Brandeburg <jesse.brandeburg@intel.com> | 2006-01-18 16:01:39 -0500 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2006-01-18 16:17:58 -0500 |
commit | 96838a40f02950f4ff501f62a7e59ac4d508e8b7 (patch) | |
tree | 9a86ecbe58a1d63159fc6c8c10223efa0344586d /drivers/net/e1000/e1000_main.c | |
parent | 6150f038158ad8ad4b74d6b76a67e2f68fd1d8e2 (diff) |
[PATCH] e1000: Fix whitespace
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: John Ronciak <john.ronciak@intel.com>
Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 496 |
1 files changed, 249 insertions, 247 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 501f5108254e..44149f902868 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -295,7 +295,7 @@ e1000_irq_disable(struct e1000_adapter *adapter) | |||
295 | static inline void | 295 | static inline void |
296 | e1000_irq_enable(struct e1000_adapter *adapter) | 296 | e1000_irq_enable(struct e1000_adapter *adapter) |
297 | { | 297 | { |
298 | if(likely(atomic_dec_and_test(&adapter->irq_sem))) { | 298 | if (likely(atomic_dec_and_test(&adapter->irq_sem))) { |
299 | E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); | 299 | E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); |
300 | E1000_WRITE_FLUSH(&adapter->hw); | 300 | E1000_WRITE_FLUSH(&adapter->hw); |
301 | } | 301 | } |
@@ -307,17 +307,17 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter) | |||
307 | struct net_device *netdev = adapter->netdev; | 307 | struct net_device *netdev = adapter->netdev; |
308 | uint16_t vid = adapter->hw.mng_cookie.vlan_id; | 308 | uint16_t vid = adapter->hw.mng_cookie.vlan_id; |
309 | uint16_t old_vid = adapter->mng_vlan_id; | 309 | uint16_t old_vid = adapter->mng_vlan_id; |
310 | if(adapter->vlgrp) { | 310 | if (adapter->vlgrp) { |
311 | if(!adapter->vlgrp->vlan_devices[vid]) { | 311 | if (!adapter->vlgrp->vlan_devices[vid]) { |
312 | if(adapter->hw.mng_cookie.status & | 312 | if (adapter->hw.mng_cookie.status & |
313 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { | 313 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { |
314 | e1000_vlan_rx_add_vid(netdev, vid); | 314 | e1000_vlan_rx_add_vid(netdev, vid); |
315 | adapter->mng_vlan_id = vid; | 315 | adapter->mng_vlan_id = vid; |
316 | } else | 316 | } else |
317 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | 317 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
318 | 318 | ||
319 | if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && | 319 | if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && |
320 | (vid != old_vid) && | 320 | (vid != old_vid) && |
321 | !adapter->vlgrp->vlan_devices[old_vid]) | 321 | !adapter->vlgrp->vlan_devices[old_vid]) |
322 | e1000_vlan_rx_kill_vid(netdev, old_vid); | 322 | e1000_vlan_rx_kill_vid(netdev, old_vid); |
323 | } | 323 | } |
@@ -401,10 +401,10 @@ e1000_up(struct e1000_adapter *adapter) | |||
401 | /* hardware has been reset, we need to reload some things */ | 401 | /* hardware has been reset, we need to reload some things */ |
402 | 402 | ||
403 | /* Reset the PHY if it was previously powered down */ | 403 | /* Reset the PHY if it was previously powered down */ |
404 | if(adapter->hw.media_type == e1000_media_type_copper) { | 404 | if (adapter->hw.media_type == e1000_media_type_copper) { |
405 | uint16_t mii_reg; | 405 | uint16_t mii_reg; |
406 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); | 406 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); |
407 | if(mii_reg & MII_CR_POWER_DOWN) | 407 | if (mii_reg & MII_CR_POWER_DOWN) |
408 | e1000_phy_reset(&adapter->hw); | 408 | e1000_phy_reset(&adapter->hw); |
409 | } | 409 | } |
410 | 410 | ||
@@ -425,16 +425,16 @@ e1000_up(struct e1000_adapter *adapter) | |||
425 | } | 425 | } |
426 | 426 | ||
427 | #ifdef CONFIG_PCI_MSI | 427 | #ifdef CONFIG_PCI_MSI |
428 | if(adapter->hw.mac_type > e1000_82547_rev_2) { | 428 | if (adapter->hw.mac_type > e1000_82547_rev_2) { |
429 | adapter->have_msi = TRUE; | 429 | adapter->have_msi = TRUE; |
430 | if((err = pci_enable_msi(adapter->pdev))) { | 430 | if ((err = pci_enable_msi(adapter->pdev))) { |
431 | DPRINTK(PROBE, ERR, | 431 | DPRINTK(PROBE, ERR, |
432 | "Unable to allocate MSI interrupt Error: %d\n", err); | 432 | "Unable to allocate MSI interrupt Error: %d\n", err); |
433 | adapter->have_msi = FALSE; | 433 | adapter->have_msi = FALSE; |
434 | } | 434 | } |
435 | } | 435 | } |
436 | #endif | 436 | #endif |
437 | if((err = request_irq(adapter->pdev->irq, &e1000_intr, | 437 | if ((err = request_irq(adapter->pdev->irq, &e1000_intr, |
438 | SA_SHIRQ | SA_SAMPLE_RANDOM, | 438 | SA_SHIRQ | SA_SAMPLE_RANDOM, |
439 | netdev->name, netdev))) { | 439 | netdev->name, netdev))) { |
440 | DPRINTK(PROBE, ERR, | 440 | DPRINTK(PROBE, ERR, |
@@ -471,7 +471,7 @@ e1000_down(struct e1000_adapter *adapter) | |||
471 | #endif | 471 | #endif |
472 | free_irq(adapter->pdev->irq, netdev); | 472 | free_irq(adapter->pdev->irq, netdev); |
473 | #ifdef CONFIG_PCI_MSI | 473 | #ifdef CONFIG_PCI_MSI |
474 | if(adapter->hw.mac_type > e1000_82547_rev_2 && | 474 | if (adapter->hw.mac_type > e1000_82547_rev_2 && |
475 | adapter->have_msi == TRUE) | 475 | adapter->have_msi == TRUE) |
476 | pci_disable_msi(adapter->pdev); | 476 | pci_disable_msi(adapter->pdev); |
477 | #endif | 477 | #endif |
@@ -537,12 +537,12 @@ e1000_reset(struct e1000_adapter *adapter) | |||
537 | break; | 537 | break; |
538 | } | 538 | } |
539 | 539 | ||
540 | if((adapter->hw.mac_type != e1000_82573) && | 540 | if ((adapter->hw.mac_type != e1000_82573) && |
541 | (adapter->netdev->mtu > E1000_RXBUFFER_8192)) | 541 | (adapter->netdev->mtu > E1000_RXBUFFER_8192)) |
542 | pba -= 8; /* allocate more FIFO for Tx */ | 542 | pba -= 8; /* allocate more FIFO for Tx */ |
543 | 543 | ||
544 | 544 | ||
545 | if(adapter->hw.mac_type == e1000_82547) { | 545 | if (adapter->hw.mac_type == e1000_82547) { |
546 | adapter->tx_fifo_head = 0; | 546 | adapter->tx_fifo_head = 0; |
547 | adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; | 547 | adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; |
548 | adapter->tx_fifo_size = | 548 | adapter->tx_fifo_size = |
@@ -565,9 +565,9 @@ e1000_reset(struct e1000_adapter *adapter) | |||
565 | 565 | ||
566 | /* Allow time for pending master requests to run */ | 566 | /* Allow time for pending master requests to run */ |
567 | e1000_reset_hw(&adapter->hw); | 567 | e1000_reset_hw(&adapter->hw); |
568 | if(adapter->hw.mac_type >= e1000_82544) | 568 | if (adapter->hw.mac_type >= e1000_82544) |
569 | E1000_WRITE_REG(&adapter->hw, WUC, 0); | 569 | E1000_WRITE_REG(&adapter->hw, WUC, 0); |
570 | if(e1000_init_hw(&adapter->hw)) | 570 | if (e1000_init_hw(&adapter->hw)) |
571 | DPRINTK(PROBE, ERR, "Hardware Error\n"); | 571 | DPRINTK(PROBE, ERR, "Hardware Error\n"); |
572 | e1000_update_mng_vlan(adapter); | 572 | e1000_update_mng_vlan(adapter); |
573 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ | 573 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ |
@@ -606,26 +606,26 @@ e1000_probe(struct pci_dev *pdev, | |||
606 | int i, err, pci_using_dac; | 606 | int i, err, pci_using_dac; |
607 | uint16_t eeprom_data; | 607 | uint16_t eeprom_data; |
608 | uint16_t eeprom_apme_mask = E1000_EEPROM_APME; | 608 | uint16_t eeprom_apme_mask = E1000_EEPROM_APME; |
609 | if((err = pci_enable_device(pdev))) | 609 | if ((err = pci_enable_device(pdev))) |
610 | return err; | 610 | return err; |
611 | 611 | ||
612 | if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { | 612 | if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { |
613 | pci_using_dac = 1; | 613 | pci_using_dac = 1; |
614 | } else { | 614 | } else { |
615 | if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { | 615 | if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { |
616 | E1000_ERR("No usable DMA configuration, aborting\n"); | 616 | E1000_ERR("No usable DMA configuration, aborting\n"); |
617 | return err; | 617 | return err; |
618 | } | 618 | } |
619 | pci_using_dac = 0; | 619 | pci_using_dac = 0; |
620 | } | 620 | } |
621 | 621 | ||
622 | if((err = pci_request_regions(pdev, e1000_driver_name))) | 622 | if ((err = pci_request_regions(pdev, e1000_driver_name))) |
623 | return err; | 623 | return err; |
624 | 624 | ||
625 | pci_set_master(pdev); | 625 | pci_set_master(pdev); |
626 | 626 | ||
627 | netdev = alloc_etherdev(sizeof(struct e1000_adapter)); | 627 | netdev = alloc_etherdev(sizeof(struct e1000_adapter)); |
628 | if(!netdev) { | 628 | if (!netdev) { |
629 | err = -ENOMEM; | 629 | err = -ENOMEM; |
630 | goto err_alloc_etherdev; | 630 | goto err_alloc_etherdev; |
631 | } | 631 | } |
@@ -644,15 +644,15 @@ e1000_probe(struct pci_dev *pdev, | |||
644 | mmio_len = pci_resource_len(pdev, BAR_0); | 644 | mmio_len = pci_resource_len(pdev, BAR_0); |
645 | 645 | ||
646 | adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); | 646 | adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); |
647 | if(!adapter->hw.hw_addr) { | 647 | if (!adapter->hw.hw_addr) { |
648 | err = -EIO; | 648 | err = -EIO; |
649 | goto err_ioremap; | 649 | goto err_ioremap; |
650 | } | 650 | } |
651 | 651 | ||
652 | for(i = BAR_1; i <= BAR_5; i++) { | 652 | for (i = BAR_1; i <= BAR_5; i++) { |
653 | if(pci_resource_len(pdev, i) == 0) | 653 | if (pci_resource_len(pdev, i) == 0) |
654 | continue; | 654 | continue; |
655 | if(pci_resource_flags(pdev, i) & IORESOURCE_IO) { | 655 | if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { |
656 | adapter->hw.io_base = pci_resource_start(pdev, i); | 656 | adapter->hw.io_base = pci_resource_start(pdev, i); |
657 | break; | 657 | break; |
658 | } | 658 | } |
@@ -689,13 +689,13 @@ e1000_probe(struct pci_dev *pdev, | |||
689 | 689 | ||
690 | /* setup the private structure */ | 690 | /* setup the private structure */ |
691 | 691 | ||
692 | if((err = e1000_sw_init(adapter))) | 692 | if ((err = e1000_sw_init(adapter))) |
693 | goto err_sw_init; | 693 | goto err_sw_init; |
694 | 694 | ||
695 | if((err = e1000_check_phy_reset_block(&adapter->hw))) | 695 | if ((err = e1000_check_phy_reset_block(&adapter->hw))) |
696 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); | 696 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); |
697 | 697 | ||
698 | if(adapter->hw.mac_type >= e1000_82543) { | 698 | if (adapter->hw.mac_type >= e1000_82543) { |
699 | netdev->features = NETIF_F_SG | | 699 | netdev->features = NETIF_F_SG | |
700 | NETIF_F_HW_CSUM | | 700 | NETIF_F_HW_CSUM | |
701 | NETIF_F_HW_VLAN_TX | | 701 | NETIF_F_HW_VLAN_TX | |
@@ -704,16 +704,16 @@ e1000_probe(struct pci_dev *pdev, | |||
704 | } | 704 | } |
705 | 705 | ||
706 | #ifdef NETIF_F_TSO | 706 | #ifdef NETIF_F_TSO |
707 | if((adapter->hw.mac_type >= e1000_82544) && | 707 | if ((adapter->hw.mac_type >= e1000_82544) && |
708 | (adapter->hw.mac_type != e1000_82547)) | 708 | (adapter->hw.mac_type != e1000_82547)) |
709 | netdev->features |= NETIF_F_TSO; | 709 | netdev->features |= NETIF_F_TSO; |
710 | 710 | ||
711 | #ifdef NETIF_F_TSO_IPV6 | 711 | #ifdef NETIF_F_TSO_IPV6 |
712 | if(adapter->hw.mac_type > e1000_82547_rev_2) | 712 | if (adapter->hw.mac_type > e1000_82547_rev_2) |
713 | netdev->features |= NETIF_F_TSO_IPV6; | 713 | netdev->features |= NETIF_F_TSO_IPV6; |
714 | #endif | 714 | #endif |
715 | #endif | 715 | #endif |
716 | if(pci_using_dac) | 716 | if (pci_using_dac) |
717 | netdev->features |= NETIF_F_HIGHDMA; | 717 | netdev->features |= NETIF_F_HIGHDMA; |
718 | 718 | ||
719 | /* hard_start_xmit is safe against parallel locking */ | 719 | /* hard_start_xmit is safe against parallel locking */ |
@@ -721,14 +721,14 @@ e1000_probe(struct pci_dev *pdev, | |||
721 | 721 | ||
722 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); | 722 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); |
723 | 723 | ||
724 | /* before reading the EEPROM, reset the controller to | 724 | /* before reading the EEPROM, reset the controller to |
725 | * put the device in a known good starting state */ | 725 | * put the device in a known good starting state */ |
726 | 726 | ||
727 | e1000_reset_hw(&adapter->hw); | 727 | e1000_reset_hw(&adapter->hw); |
728 | 728 | ||
729 | /* make sure the EEPROM is good */ | 729 | /* make sure the EEPROM is good */ |
730 | 730 | ||
731 | if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) { | 731 | if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) { |
732 | DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); | 732 | DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); |
733 | err = -EIO; | 733 | err = -EIO; |
734 | goto err_eeprom; | 734 | goto err_eeprom; |
@@ -736,12 +736,12 @@ e1000_probe(struct pci_dev *pdev, | |||
736 | 736 | ||
737 | /* copy the MAC address out of the EEPROM */ | 737 | /* copy the MAC address out of the EEPROM */ |
738 | 738 | ||
739 | if(e1000_read_mac_addr(&adapter->hw)) | 739 | if (e1000_read_mac_addr(&adapter->hw)) |
740 | DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); | 740 | DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); |
741 | memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); | 741 | memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); |
742 | memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); | 742 | memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); |
743 | 743 | ||
744 | if(!is_valid_ether_addr(netdev->perm_addr)) { | 744 | if (!is_valid_ether_addr(netdev->perm_addr)) { |
745 | DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); | 745 | DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); |
746 | err = -EIO; | 746 | err = -EIO; |
747 | goto err_eeprom; | 747 | goto err_eeprom; |
@@ -781,7 +781,7 @@ e1000_probe(struct pci_dev *pdev, | |||
781 | * enable the ACPI Magic Packet filter | 781 | * enable the ACPI Magic Packet filter |
782 | */ | 782 | */ |
783 | 783 | ||
784 | switch(adapter->hw.mac_type) { | 784 | switch (adapter->hw.mac_type) { |
785 | case e1000_82542_rev2_0: | 785 | case e1000_82542_rev2_0: |
786 | case e1000_82542_rev2_1: | 786 | case e1000_82542_rev2_1: |
787 | case e1000_82543: | 787 | case e1000_82543: |
@@ -794,7 +794,7 @@ e1000_probe(struct pci_dev *pdev, | |||
794 | case e1000_82546: | 794 | case e1000_82546: |
795 | case e1000_82546_rev_3: | 795 | case e1000_82546_rev_3: |
796 | case e1000_82571: | 796 | case e1000_82571: |
797 | if(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ | 797 | if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ |
798 | e1000_read_eeprom(&adapter->hw, | 798 | e1000_read_eeprom(&adapter->hw, |
799 | EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); | 799 | EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); |
800 | break; | 800 | break; |
@@ -805,7 +805,7 @@ e1000_probe(struct pci_dev *pdev, | |||
805 | EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); | 805 | EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); |
806 | break; | 806 | break; |
807 | } | 807 | } |
808 | if(eeprom_data & eeprom_apme_mask) | 808 | if (eeprom_data & eeprom_apme_mask) |
809 | adapter->wol |= E1000_WUFC_MAG; | 809 | adapter->wol |= E1000_WUFC_MAG; |
810 | 810 | ||
811 | /* print bus type/speed/width info */ | 811 | /* print bus type/speed/width info */ |
@@ -840,7 +840,7 @@ e1000_probe(struct pci_dev *pdev, | |||
840 | e1000_get_hw_control(adapter); | 840 | e1000_get_hw_control(adapter); |
841 | 841 | ||
842 | strcpy(netdev->name, "eth%d"); | 842 | strcpy(netdev->name, "eth%d"); |
843 | if((err = register_netdev(netdev))) | 843 | if ((err = register_netdev(netdev))) |
844 | goto err_register; | 844 | goto err_register; |
845 | 845 | ||
846 | DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); | 846 | DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); |
@@ -881,10 +881,10 @@ e1000_remove(struct pci_dev *pdev) | |||
881 | 881 | ||
882 | flush_scheduled_work(); | 882 | flush_scheduled_work(); |
883 | 883 | ||
884 | if(adapter->hw.mac_type >= e1000_82540 && | 884 | if (adapter->hw.mac_type >= e1000_82540 && |
885 | adapter->hw.media_type == e1000_media_type_copper) { | 885 | adapter->hw.media_type == e1000_media_type_copper) { |
886 | manc = E1000_READ_REG(&adapter->hw, MANC); | 886 | manc = E1000_READ_REG(&adapter->hw, MANC); |
887 | if(manc & E1000_MANC_SMBUS_EN) { | 887 | if (manc & E1000_MANC_SMBUS_EN) { |
888 | manc |= E1000_MANC_ARP_EN; | 888 | manc |= E1000_MANC_ARP_EN; |
889 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | 889 | E1000_WRITE_REG(&adapter->hw, MANC, manc); |
890 | } | 890 | } |
@@ -900,7 +900,7 @@ e1000_remove(struct pci_dev *pdev) | |||
900 | __dev_put(&adapter->polling_netdev[i]); | 900 | __dev_put(&adapter->polling_netdev[i]); |
901 | #endif | 901 | #endif |
902 | 902 | ||
903 | if(!e1000_check_phy_reset_block(&adapter->hw)) | 903 | if (!e1000_check_phy_reset_block(&adapter->hw)) |
904 | e1000_phy_hw_reset(&adapter->hw); | 904 | e1000_phy_hw_reset(&adapter->hw); |
905 | 905 | ||
906 | kfree(adapter->tx_ring); | 906 | kfree(adapter->tx_ring); |
@@ -959,19 +959,19 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
959 | 959 | ||
960 | /* identify the MAC */ | 960 | /* identify the MAC */ |
961 | 961 | ||
962 | if(e1000_set_mac_type(hw)) { | 962 | if (e1000_set_mac_type(hw)) { |
963 | DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); | 963 | DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); |
964 | return -EIO; | 964 | return -EIO; |
965 | } | 965 | } |
966 | 966 | ||
967 | /* initialize eeprom parameters */ | 967 | /* initialize eeprom parameters */ |
968 | 968 | ||
969 | if(e1000_init_eeprom_params(hw)) { | 969 | if (e1000_init_eeprom_params(hw)) { |
970 | E1000_ERR("EEPROM initialization failed\n"); | 970 | E1000_ERR("EEPROM initialization failed\n"); |
971 | return -EIO; | 971 | return -EIO; |
972 | } | 972 | } |
973 | 973 | ||
974 | switch(hw->mac_type) { | 974 | switch (hw->mac_type) { |
975 | default: | 975 | default: |
976 | break; | 976 | break; |
977 | case e1000_82541: | 977 | case e1000_82541: |
@@ -990,7 +990,7 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
990 | 990 | ||
991 | /* Copper options */ | 991 | /* Copper options */ |
992 | 992 | ||
993 | if(hw->media_type == e1000_media_type_copper) { | 993 | if (hw->media_type == e1000_media_type_copper) { |
994 | hw->mdix = AUTO_ALL_MODES; | 994 | hw->mdix = AUTO_ALL_MODES; |
995 | hw->disable_polarity_correction = FALSE; | 995 | hw->disable_polarity_correction = FALSE; |
996 | hw->master_slave = E1000_MASTER_SLAVE; | 996 | hw->master_slave = E1000_MASTER_SLAVE; |
@@ -1166,10 +1166,10 @@ e1000_open(struct net_device *netdev) | |||
1166 | if ((err = e1000_setup_all_rx_resources(adapter))) | 1166 | if ((err = e1000_setup_all_rx_resources(adapter))) |
1167 | goto err_setup_rx; | 1167 | goto err_setup_rx; |
1168 | 1168 | ||
1169 | if((err = e1000_up(adapter))) | 1169 | if ((err = e1000_up(adapter))) |
1170 | goto err_up; | 1170 | goto err_up; |
1171 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | 1171 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
1172 | if((adapter->hw.mng_cookie.status & | 1172 | if ((adapter->hw.mng_cookie.status & |
1173 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { | 1173 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { |
1174 | e1000_update_mng_vlan(adapter); | 1174 | e1000_update_mng_vlan(adapter); |
1175 | } | 1175 | } |
@@ -1214,7 +1214,7 @@ e1000_close(struct net_device *netdev) | |||
1214 | e1000_free_all_tx_resources(adapter); | 1214 | e1000_free_all_tx_resources(adapter); |
1215 | e1000_free_all_rx_resources(adapter); | 1215 | e1000_free_all_rx_resources(adapter); |
1216 | 1216 | ||
1217 | if((adapter->hw.mng_cookie.status & | 1217 | if ((adapter->hw.mng_cookie.status & |
1218 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { | 1218 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { |
1219 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | 1219 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); |
1220 | } | 1220 | } |
@@ -1269,7 +1269,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter, | |||
1269 | size = sizeof(struct e1000_buffer) * txdr->count; | 1269 | size = sizeof(struct e1000_buffer) * txdr->count; |
1270 | 1270 | ||
1271 | txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); | 1271 | txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); |
1272 | if(!txdr->buffer_info) { | 1272 | if (!txdr->buffer_info) { |
1273 | DPRINTK(PROBE, ERR, | 1273 | DPRINTK(PROBE, ERR, |
1274 | "Unable to allocate memory for the transmit descriptor ring\n"); | 1274 | "Unable to allocate memory for the transmit descriptor ring\n"); |
1275 | return -ENOMEM; | 1275 | return -ENOMEM; |
@@ -1282,7 +1282,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter, | |||
1282 | E1000_ROUNDUP(txdr->size, 4096); | 1282 | E1000_ROUNDUP(txdr->size, 4096); |
1283 | 1283 | ||
1284 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); | 1284 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); |
1285 | if(!txdr->desc) { | 1285 | if (!txdr->desc) { |
1286 | setup_tx_desc_die: | 1286 | setup_tx_desc_die: |
1287 | vfree(txdr->buffer_info); | 1287 | vfree(txdr->buffer_info); |
1288 | DPRINTK(PROBE, ERR, | 1288 | DPRINTK(PROBE, ERR, |
@@ -1298,8 +1298,8 @@ setup_tx_desc_die: | |||
1298 | "at %p\n", txdr->size, txdr->desc); | 1298 | "at %p\n", txdr->size, txdr->desc); |
1299 | /* Try again, without freeing the previous */ | 1299 | /* Try again, without freeing the previous */ |
1300 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); | 1300 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); |
1301 | if(!txdr->desc) { | ||
1302 | /* Failed allocation, critical failure */ | 1301 | /* Failed allocation, critical failure */ |
1302 | if (!txdr->desc) { | ||
1303 | pci_free_consistent(pdev, txdr->size, olddesc, olddma); | 1303 | pci_free_consistent(pdev, txdr->size, olddesc, olddma); |
1304 | goto setup_tx_desc_die; | 1304 | goto setup_tx_desc_die; |
1305 | } | 1305 | } |
@@ -1499,7 +1499,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter, | |||
1499 | 1499 | ||
1500 | size = sizeof(struct e1000_ps_page) * rxdr->count; | 1500 | size = sizeof(struct e1000_ps_page) * rxdr->count; |
1501 | rxdr->ps_page = kmalloc(size, GFP_KERNEL); | 1501 | rxdr->ps_page = kmalloc(size, GFP_KERNEL); |
1502 | if(!rxdr->ps_page) { | 1502 | if (!rxdr->ps_page) { |
1503 | vfree(rxdr->buffer_info); | 1503 | vfree(rxdr->buffer_info); |
1504 | DPRINTK(PROBE, ERR, | 1504 | DPRINTK(PROBE, ERR, |
1505 | "Unable to allocate memory for the receive descriptor ring\n"); | 1505 | "Unable to allocate memory for the receive descriptor ring\n"); |
@@ -1509,7 +1509,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter, | |||
1509 | 1509 | ||
1510 | size = sizeof(struct e1000_ps_page_dma) * rxdr->count; | 1510 | size = sizeof(struct e1000_ps_page_dma) * rxdr->count; |
1511 | rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL); | 1511 | rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL); |
1512 | if(!rxdr->ps_page_dma) { | 1512 | if (!rxdr->ps_page_dma) { |
1513 | vfree(rxdr->buffer_info); | 1513 | vfree(rxdr->buffer_info); |
1514 | kfree(rxdr->ps_page); | 1514 | kfree(rxdr->ps_page); |
1515 | DPRINTK(PROBE, ERR, | 1515 | DPRINTK(PROBE, ERR, |
@@ -1518,7 +1518,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter, | |||
1518 | } | 1518 | } |
1519 | memset(rxdr->ps_page_dma, 0, size); | 1519 | memset(rxdr->ps_page_dma, 0, size); |
1520 | 1520 | ||
1521 | if(adapter->hw.mac_type <= e1000_82547_rev_2) | 1521 | if (adapter->hw.mac_type <= e1000_82547_rev_2) |
1522 | desc_len = sizeof(struct e1000_rx_desc); | 1522 | desc_len = sizeof(struct e1000_rx_desc); |
1523 | else | 1523 | else |
1524 | desc_len = sizeof(union e1000_rx_desc_packet_split); | 1524 | desc_len = sizeof(union e1000_rx_desc_packet_split); |
@@ -1647,7 +1647,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1647 | rctl |= E1000_RCTL_LPE; | 1647 | rctl |= E1000_RCTL_LPE; |
1648 | 1648 | ||
1649 | /* Setup buffer sizes */ | 1649 | /* Setup buffer sizes */ |
1650 | if(adapter->hw.mac_type >= e1000_82571) { | 1650 | if (adapter->hw.mac_type >= e1000_82571) { |
1651 | /* We can now specify buffers in 1K increments. | 1651 | /* We can now specify buffers in 1K increments. |
1652 | * BSIZE and BSEX are ignored in this case. */ | 1652 | * BSIZE and BSEX are ignored in this case. */ |
1653 | rctl |= adapter->rx_buffer_len << 0x11; | 1653 | rctl |= adapter->rx_buffer_len << 0x11; |
@@ -1681,7 +1681,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1681 | E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); | 1681 | E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); |
1682 | 1682 | ||
1683 | rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; | 1683 | rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; |
1684 | 1684 | ||
1685 | psrctl |= adapter->rx_ps_bsize0 >> | 1685 | psrctl |= adapter->rx_ps_bsize0 >> |
1686 | E1000_PSRCTL_BSIZE0_SHIFT; | 1686 | E1000_PSRCTL_BSIZE0_SHIFT; |
1687 | 1687 | ||
@@ -1743,7 +1743,7 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1743 | 1743 | ||
1744 | if (hw->mac_type >= e1000_82540) { | 1744 | if (hw->mac_type >= e1000_82540) { |
1745 | E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); | 1745 | E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); |
1746 | if(adapter->itr > 1) | 1746 | if (adapter->itr > 1) |
1747 | E1000_WRITE_REG(hw, ITR, | 1747 | E1000_WRITE_REG(hw, ITR, |
1748 | 1000000000 / (adapter->itr * 256)); | 1748 | 1000000000 / (adapter->itr * 256)); |
1749 | } | 1749 | } |
@@ -1832,13 +1832,13 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1832 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ | 1832 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ |
1833 | if (hw->mac_type >= e1000_82543) { | 1833 | if (hw->mac_type >= e1000_82543) { |
1834 | rxcsum = E1000_READ_REG(hw, RXCSUM); | 1834 | rxcsum = E1000_READ_REG(hw, RXCSUM); |
1835 | if(adapter->rx_csum == TRUE) { | 1835 | if (adapter->rx_csum == TRUE) { |
1836 | rxcsum |= E1000_RXCSUM_TUOFL; | 1836 | rxcsum |= E1000_RXCSUM_TUOFL; |
1837 | 1837 | ||
1838 | /* Enable 82571 IPv4 payload checksum for UDP fragments | 1838 | /* Enable 82571 IPv4 payload checksum for UDP fragments |
1839 | * Must be used in conjunction with packet-split. */ | 1839 | * Must be used in conjunction with packet-split. */ |
1840 | if ((hw->mac_type >= e1000_82571) && | 1840 | if ((hw->mac_type >= e1000_82571) && |
1841 | (adapter->rx_ps_pages)) { | 1841 | (adapter->rx_ps_pages)) { |
1842 | rxcsum |= E1000_RXCSUM_IPPCSE; | 1842 | rxcsum |= E1000_RXCSUM_IPPCSE; |
1843 | } | 1843 | } |
1844 | } else { | 1844 | } else { |
@@ -1900,7 +1900,7 @@ static inline void | |||
1900 | e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, | 1900 | e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, |
1901 | struct e1000_buffer *buffer_info) | 1901 | struct e1000_buffer *buffer_info) |
1902 | { | 1902 | { |
1903 | if(buffer_info->dma) { | 1903 | if (buffer_info->dma) { |
1904 | pci_unmap_page(adapter->pdev, | 1904 | pci_unmap_page(adapter->pdev, |
1905 | buffer_info->dma, | 1905 | buffer_info->dma, |
1906 | buffer_info->length, | 1906 | buffer_info->length, |
@@ -1927,7 +1927,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter, | |||
1927 | 1927 | ||
1928 | /* Free all the Tx ring sk_buffs */ | 1928 | /* Free all the Tx ring sk_buffs */ |
1929 | 1929 | ||
1930 | for(i = 0; i < tx_ring->count; i++) { | 1930 | for (i = 0; i < tx_ring->count; i++) { |
1931 | buffer_info = &tx_ring->buffer_info[i]; | 1931 | buffer_info = &tx_ring->buffer_info[i]; |
1932 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); | 1932 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
1933 | } | 1933 | } |
@@ -2023,10 +2023,9 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter, | |||
2023 | unsigned int i, j; | 2023 | unsigned int i, j; |
2024 | 2024 | ||
2025 | /* Free all the Rx ring sk_buffs */ | 2025 | /* Free all the Rx ring sk_buffs */ |
2026 | 2026 | for (i = 0; i < rx_ring->count; i++) { | |
2027 | for(i = 0; i < rx_ring->count; i++) { | ||
2028 | buffer_info = &rx_ring->buffer_info[i]; | 2027 | buffer_info = &rx_ring->buffer_info[i]; |
2029 | if(buffer_info->skb) { | 2028 | if (buffer_info->skb) { |
2030 | pci_unmap_single(pdev, | 2029 | pci_unmap_single(pdev, |
2031 | buffer_info->dma, | 2030 | buffer_info->dma, |
2032 | buffer_info->length, | 2031 | buffer_info->length, |
@@ -2107,7 +2106,7 @@ e1000_enter_82542_rst(struct e1000_adapter *adapter) | |||
2107 | E1000_WRITE_FLUSH(&adapter->hw); | 2106 | E1000_WRITE_FLUSH(&adapter->hw); |
2108 | mdelay(5); | 2107 | mdelay(5); |
2109 | 2108 | ||
2110 | if(netif_running(netdev)) | 2109 | if (netif_running(netdev)) |
2111 | e1000_clean_all_rx_rings(adapter); | 2110 | e1000_clean_all_rx_rings(adapter); |
2112 | } | 2111 | } |
2113 | 2112 | ||
@@ -2123,10 +2122,10 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter) | |||
2123 | E1000_WRITE_FLUSH(&adapter->hw); | 2122 | E1000_WRITE_FLUSH(&adapter->hw); |
2124 | mdelay(5); | 2123 | mdelay(5); |
2125 | 2124 | ||
2126 | if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) | 2125 | if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) |
2127 | e1000_pci_set_mwi(&adapter->hw); | 2126 | e1000_pci_set_mwi(&adapter->hw); |
2128 | 2127 | ||
2129 | if(netif_running(netdev)) { | 2128 | if (netif_running(netdev)) { |
2130 | e1000_configure_rx(adapter); | 2129 | e1000_configure_rx(adapter); |
2131 | /* No need to loop, because 82542 supports only 1 queue */ | 2130 | /* No need to loop, because 82542 supports only 1 queue */ |
2132 | struct e1000_rx_ring *ring = &adapter->rx_ring[0]; | 2131 | struct e1000_rx_ring *ring = &adapter->rx_ring[0]; |
@@ -2148,12 +2147,12 @@ e1000_set_mac(struct net_device *netdev, void *p) | |||
2148 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2147 | struct e1000_adapter *adapter = netdev_priv(netdev); |
2149 | struct sockaddr *addr = p; | 2148 | struct sockaddr *addr = p; |
2150 | 2149 | ||
2151 | if(!is_valid_ether_addr(addr->sa_data)) | 2150 | if (!is_valid_ether_addr(addr->sa_data)) |
2152 | return -EADDRNOTAVAIL; | 2151 | return -EADDRNOTAVAIL; |
2153 | 2152 | ||
2154 | /* 82542 2.0 needs to be in reset to write receive address registers */ | 2153 | /* 82542 2.0 needs to be in reset to write receive address registers */ |
2155 | 2154 | ||
2156 | if(adapter->hw.mac_type == e1000_82542_rev2_0) | 2155 | if (adapter->hw.mac_type == e1000_82542_rev2_0) |
2157 | e1000_enter_82542_rst(adapter); | 2156 | e1000_enter_82542_rst(adapter); |
2158 | 2157 | ||
2159 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 2158 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
@@ -2167,17 +2166,17 @@ e1000_set_mac(struct net_device *netdev, void *p) | |||
2167 | /* activate the work around */ | 2166 | /* activate the work around */ |
2168 | adapter->hw.laa_is_present = 1; | 2167 | adapter->hw.laa_is_present = 1; |
2169 | 2168 | ||
2170 | /* Hold a copy of the LAA in RAR[14] This is done so that | 2169 | /* Hold a copy of the LAA in RAR[14] This is done so that |
2171 | * between the time RAR[0] gets clobbered and the time it | 2170 | * between the time RAR[0] gets clobbered and the time it |
2172 | * gets fixed (in e1000_watchdog), the actual LAA is in one | 2171 | * gets fixed (in e1000_watchdog), the actual LAA is in one |
2173 | * of the RARs and no incoming packets directed to this port | 2172 | * of the RARs and no incoming packets directed to this port |
2174 | * are dropped. Eventaully the LAA will be in RAR[0] and | 2173 | * are dropped. Eventaully the LAA will be in RAR[0] and |
2175 | * RAR[14] */ | 2174 | * RAR[14] */ |
2176 | e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, | 2175 | e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, |
2177 | E1000_RAR_ENTRIES - 1); | 2176 | E1000_RAR_ENTRIES - 1); |
2178 | } | 2177 | } |
2179 | 2178 | ||
2180 | if(adapter->hw.mac_type == e1000_82542_rev2_0) | 2179 | if (adapter->hw.mac_type == e1000_82542_rev2_0) |
2181 | e1000_leave_82542_rst(adapter); | 2180 | e1000_leave_82542_rst(adapter); |
2182 | 2181 | ||
2183 | return 0; | 2182 | return 0; |
@@ -2211,9 +2210,9 @@ e1000_set_multi(struct net_device *netdev) | |||
2211 | 2210 | ||
2212 | rctl = E1000_READ_REG(hw, RCTL); | 2211 | rctl = E1000_READ_REG(hw, RCTL); |
2213 | 2212 | ||
2214 | if(netdev->flags & IFF_PROMISC) { | 2213 | if (netdev->flags & IFF_PROMISC) { |
2215 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); | 2214 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); |
2216 | } else if(netdev->flags & IFF_ALLMULTI) { | 2215 | } else if (netdev->flags & IFF_ALLMULTI) { |
2217 | rctl |= E1000_RCTL_MPE; | 2216 | rctl |= E1000_RCTL_MPE; |
2218 | rctl &= ~E1000_RCTL_UPE; | 2217 | rctl &= ~E1000_RCTL_UPE; |
2219 | } else { | 2218 | } else { |
@@ -2224,7 +2223,7 @@ e1000_set_multi(struct net_device *netdev) | |||
2224 | 2223 | ||
2225 | /* 82542 2.0 needs to be in reset to write receive address registers */ | 2224 | /* 82542 2.0 needs to be in reset to write receive address registers */ |
2226 | 2225 | ||
2227 | if(hw->mac_type == e1000_82542_rev2_0) | 2226 | if (hw->mac_type == e1000_82542_rev2_0) |
2228 | e1000_enter_82542_rst(adapter); | 2227 | e1000_enter_82542_rst(adapter); |
2229 | 2228 | ||
2230 | /* load the first 14 multicast address into the exact filters 1-14 | 2229 | /* load the first 14 multicast address into the exact filters 1-14 |
@@ -2234,7 +2233,7 @@ e1000_set_multi(struct net_device *netdev) | |||
2234 | */ | 2233 | */ |
2235 | mc_ptr = netdev->mc_list; | 2234 | mc_ptr = netdev->mc_list; |
2236 | 2235 | ||
2237 | for(i = 1; i < rar_entries; i++) { | 2236 | for (i = 1; i < rar_entries; i++) { |
2238 | if (mc_ptr) { | 2237 | if (mc_ptr) { |
2239 | e1000_rar_set(hw, mc_ptr->dmi_addr, i); | 2238 | e1000_rar_set(hw, mc_ptr->dmi_addr, i); |
2240 | mc_ptr = mc_ptr->next; | 2239 | mc_ptr = mc_ptr->next; |
@@ -2246,17 +2245,17 @@ e1000_set_multi(struct net_device *netdev) | |||
2246 | 2245 | ||
2247 | /* clear the old settings from the multicast hash table */ | 2246 | /* clear the old settings from the multicast hash table */ |
2248 | 2247 | ||
2249 | for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++) | 2248 | for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++) |
2250 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); | 2249 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); |
2251 | 2250 | ||
2252 | /* load any remaining addresses into the hash table */ | 2251 | /* load any remaining addresses into the hash table */ |
2253 | 2252 | ||
2254 | for(; mc_ptr; mc_ptr = mc_ptr->next) { | 2253 | for (; mc_ptr; mc_ptr = mc_ptr->next) { |
2255 | hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr); | 2254 | hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr); |
2256 | e1000_mta_set(hw, hash_value); | 2255 | e1000_mta_set(hw, hash_value); |
2257 | } | 2256 | } |
2258 | 2257 | ||
2259 | if(hw->mac_type == e1000_82542_rev2_0) | 2258 | if (hw->mac_type == e1000_82542_rev2_0) |
2260 | e1000_leave_82542_rst(adapter); | 2259 | e1000_leave_82542_rst(adapter); |
2261 | } | 2260 | } |
2262 | 2261 | ||
@@ -2282,8 +2281,8 @@ e1000_82547_tx_fifo_stall(unsigned long data) | |||
2282 | struct net_device *netdev = adapter->netdev; | 2281 | struct net_device *netdev = adapter->netdev; |
2283 | uint32_t tctl; | 2282 | uint32_t tctl; |
2284 | 2283 | ||
2285 | if(atomic_read(&adapter->tx_fifo_stall)) { | 2284 | if (atomic_read(&adapter->tx_fifo_stall)) { |
2286 | if((E1000_READ_REG(&adapter->hw, TDT) == | 2285 | if ((E1000_READ_REG(&adapter->hw, TDT) == |
2287 | E1000_READ_REG(&adapter->hw, TDH)) && | 2286 | E1000_READ_REG(&adapter->hw, TDH)) && |
2288 | (E1000_READ_REG(&adapter->hw, TDFT) == | 2287 | (E1000_READ_REG(&adapter->hw, TDFT) == |
2289 | E1000_READ_REG(&adapter->hw, TDFH)) && | 2288 | E1000_READ_REG(&adapter->hw, TDFH)) && |
@@ -2335,18 +2334,18 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2335 | e1000_check_for_link(&adapter->hw); | 2334 | e1000_check_for_link(&adapter->hw); |
2336 | if (adapter->hw.mac_type == e1000_82573) { | 2335 | if (adapter->hw.mac_type == e1000_82573) { |
2337 | e1000_enable_tx_pkt_filtering(&adapter->hw); | 2336 | e1000_enable_tx_pkt_filtering(&adapter->hw); |
2338 | if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) | 2337 | if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) |
2339 | e1000_update_mng_vlan(adapter); | 2338 | e1000_update_mng_vlan(adapter); |
2340 | } | 2339 | } |
2341 | 2340 | ||
2342 | if((adapter->hw.media_type == e1000_media_type_internal_serdes) && | 2341 | if ((adapter->hw.media_type == e1000_media_type_internal_serdes) && |
2343 | !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) | 2342 | !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) |
2344 | link = !adapter->hw.serdes_link_down; | 2343 | link = !adapter->hw.serdes_link_down; |
2345 | else | 2344 | else |
2346 | link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU; | 2345 | link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU; |
2347 | 2346 | ||
2348 | if(link) { | 2347 | if (link) { |
2349 | if(!netif_carrier_ok(netdev)) { | 2348 | if (!netif_carrier_ok(netdev)) { |
2350 | e1000_get_speed_and_duplex(&adapter->hw, | 2349 | e1000_get_speed_and_duplex(&adapter->hw, |
2351 | &adapter->link_speed, | 2350 | &adapter->link_speed, |
2352 | &adapter->link_duplex); | 2351 | &adapter->link_duplex); |
@@ -2377,7 +2376,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2377 | adapter->smartspeed = 0; | 2376 | adapter->smartspeed = 0; |
2378 | } | 2377 | } |
2379 | } else { | 2378 | } else { |
2380 | if(netif_carrier_ok(netdev)) { | 2379 | if (netif_carrier_ok(netdev)) { |
2381 | adapter->link_speed = 0; | 2380 | adapter->link_speed = 0; |
2382 | adapter->link_duplex = 0; | 2381 | adapter->link_duplex = 0; |
2383 | DPRINTK(LINK, INFO, "NIC Link is Down\n"); | 2382 | DPRINTK(LINK, INFO, "NIC Link is Down\n"); |
@@ -2417,12 +2416,12 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2417 | } | 2416 | } |
2418 | 2417 | ||
2419 | /* Dynamic mode for Interrupt Throttle Rate (ITR) */ | 2418 | /* Dynamic mode for Interrupt Throttle Rate (ITR) */ |
2420 | if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) { | 2419 | if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) { |
2421 | /* Symmetric Tx/Rx gets a reduced ITR=2000; Total | 2420 | /* Symmetric Tx/Rx gets a reduced ITR=2000; Total |
2422 | * asymmetrical Tx or Rx gets ITR=8000; everyone | 2421 | * asymmetrical Tx or Rx gets ITR=8000; everyone |
2423 | * else is between 2000-8000. */ | 2422 | * else is between 2000-8000. */ |
2424 | uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000; | 2423 | uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000; |
2425 | uint32_t dif = (adapter->gotcl > adapter->gorcl ? | 2424 | uint32_t dif = (adapter->gotcl > adapter->gorcl ? |
2426 | adapter->gotcl - adapter->gorcl : | 2425 | adapter->gotcl - adapter->gorcl : |
2427 | adapter->gorcl - adapter->gotcl) / 10000; | 2426 | adapter->gorcl - adapter->gotcl) / 10000; |
2428 | uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; | 2427 | uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; |
@@ -2435,7 +2434,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2435 | /* Force detection of hung controller every watchdog period */ | 2434 | /* Force detection of hung controller every watchdog period */ |
2436 | adapter->detect_tx_hung = TRUE; | 2435 | adapter->detect_tx_hung = TRUE; |
2437 | 2436 | ||
2438 | /* With 82571 controllers, LAA may be overwritten due to controller | 2437 | /* With 82571 controllers, LAA may be overwritten due to controller |
2439 | * reset from the other port. Set the appropriate LAA in RAR[0] */ | 2438 | * reset from the other port. Set the appropriate LAA in RAR[0] */ |
2440 | if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present) | 2439 | if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present) |
2441 | e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); | 2440 | e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); |
@@ -2464,7 +2463,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2464 | uint8_t ipcss, ipcso, tucss, tucso, hdr_len; | 2463 | uint8_t ipcss, ipcso, tucss, tucso, hdr_len; |
2465 | int err; | 2464 | int err; |
2466 | 2465 | ||
2467 | if(skb_shinfo(skb)->tso_size) { | 2466 | if (skb_shinfo(skb)->tso_size) { |
2468 | if (skb_header_cloned(skb)) { | 2467 | if (skb_header_cloned(skb)) { |
2469 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 2468 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
2470 | if (err) | 2469 | if (err) |
@@ -2473,7 +2472,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2473 | 2472 | ||
2474 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | 2473 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); |
2475 | mss = skb_shinfo(skb)->tso_size; | 2474 | mss = skb_shinfo(skb)->tso_size; |
2476 | if(skb->protocol == ntohs(ETH_P_IP)) { | 2475 | if (skb->protocol == ntohs(ETH_P_IP)) { |
2477 | skb->nh.iph->tot_len = 0; | 2476 | skb->nh.iph->tot_len = 0; |
2478 | skb->nh.iph->check = 0; | 2477 | skb->nh.iph->check = 0; |
2479 | skb->h.th->check = | 2478 | skb->h.th->check = |
@@ -2485,7 +2484,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2485 | cmd_length = E1000_TXD_CMD_IP; | 2484 | cmd_length = E1000_TXD_CMD_IP; |
2486 | ipcse = skb->h.raw - skb->data - 1; | 2485 | ipcse = skb->h.raw - skb->data - 1; |
2487 | #ifdef NETIF_F_TSO_IPV6 | 2486 | #ifdef NETIF_F_TSO_IPV6 |
2488 | } else if(skb->protocol == ntohs(ETH_P_IPV6)) { | 2487 | } else if (skb->protocol == ntohs(ETH_P_IPV6)) { |
2489 | skb->nh.ipv6h->payload_len = 0; | 2488 | skb->nh.ipv6h->payload_len = 0; |
2490 | skb->h.th->check = | 2489 | skb->h.th->check = |
2491 | ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, | 2490 | ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, |
@@ -2540,7 +2539,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2540 | unsigned int i; | 2539 | unsigned int i; |
2541 | uint8_t css; | 2540 | uint8_t css; |
2542 | 2541 | ||
2543 | if(likely(skb->ip_summed == CHECKSUM_HW)) { | 2542 | if (likely(skb->ip_summed == CHECKSUM_HW)) { |
2544 | css = skb->h.raw - skb->data; | 2543 | css = skb->h.raw - skb->data; |
2545 | 2544 | ||
2546 | i = tx_ring->next_to_use; | 2545 | i = tx_ring->next_to_use; |
@@ -2580,7 +2579,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2580 | 2579 | ||
2581 | i = tx_ring->next_to_use; | 2580 | i = tx_ring->next_to_use; |
2582 | 2581 | ||
2583 | while(len) { | 2582 | while (len) { |
2584 | buffer_info = &tx_ring->buffer_info[i]; | 2583 | buffer_info = &tx_ring->buffer_info[i]; |
2585 | size = min(len, max_per_txd); | 2584 | size = min(len, max_per_txd); |
2586 | #ifdef NETIF_F_TSO | 2585 | #ifdef NETIF_F_TSO |
@@ -2596,7 +2595,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2596 | 2595 | ||
2597 | /* Workaround for premature desc write-backs | 2596 | /* Workaround for premature desc write-backs |
2598 | * in TSO mode. Append 4-byte sentinel desc */ | 2597 | * in TSO mode. Append 4-byte sentinel desc */ |
2599 | if(unlikely(mss && !nr_frags && size == len && size > 8)) | 2598 | if (unlikely(mss && !nr_frags && size == len && size > 8)) |
2600 | size -= 4; | 2599 | size -= 4; |
2601 | #endif | 2600 | #endif |
2602 | /* work-around for errata 10 and it applies | 2601 | /* work-around for errata 10 and it applies |
@@ -2604,13 +2603,13 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2604 | * The fix is to make sure that the first descriptor of a | 2603 | * The fix is to make sure that the first descriptor of a |
2605 | * packet is smaller than 2048 - 16 - 16 (or 2016) bytes | 2604 | * packet is smaller than 2048 - 16 - 16 (or 2016) bytes |
2606 | */ | 2605 | */ |
2607 | if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && | 2606 | if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && |
2608 | (size > 2015) && count == 0)) | 2607 | (size > 2015) && count == 0)) |
2609 | size = 2015; | 2608 | size = 2015; |
2610 | 2609 | ||
2611 | /* Workaround for potential 82544 hang in PCI-X. Avoid | 2610 | /* Workaround for potential 82544 hang in PCI-X. Avoid |
2612 | * terminating buffers within evenly-aligned dwords. */ | 2611 | * terminating buffers within evenly-aligned dwords. */ |
2613 | if(unlikely(adapter->pcix_82544 && | 2612 | if (unlikely(adapter->pcix_82544 && |
2614 | !((unsigned long)(skb->data + offset + size - 1) & 4) && | 2613 | !((unsigned long)(skb->data + offset + size - 1) & 4) && |
2615 | size > 4)) | 2614 | size > 4)) |
2616 | size -= 4; | 2615 | size -= 4; |
@@ -2626,29 +2625,29 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2626 | len -= size; | 2625 | len -= size; |
2627 | offset += size; | 2626 | offset += size; |
2628 | count++; | 2627 | count++; |
2629 | if(unlikely(++i == tx_ring->count)) i = 0; | 2628 | if (unlikely(++i == tx_ring->count)) i = 0; |
2630 | } | 2629 | } |
2631 | 2630 | ||
2632 | for(f = 0; f < nr_frags; f++) { | 2631 | for (f = 0; f < nr_frags; f++) { |
2633 | struct skb_frag_struct *frag; | 2632 | struct skb_frag_struct *frag; |
2634 | 2633 | ||
2635 | frag = &skb_shinfo(skb)->frags[f]; | 2634 | frag = &skb_shinfo(skb)->frags[f]; |
2636 | len = frag->size; | 2635 | len = frag->size; |
2637 | offset = frag->page_offset; | 2636 | offset = frag->page_offset; |
2638 | 2637 | ||
2639 | while(len) { | 2638 | while (len) { |
2640 | buffer_info = &tx_ring->buffer_info[i]; | 2639 | buffer_info = &tx_ring->buffer_info[i]; |
2641 | size = min(len, max_per_txd); | 2640 | size = min(len, max_per_txd); |
2642 | #ifdef NETIF_F_TSO | 2641 | #ifdef NETIF_F_TSO |
2643 | /* Workaround for premature desc write-backs | 2642 | /* Workaround for premature desc write-backs |
2644 | * in TSO mode. Append 4-byte sentinel desc */ | 2643 | * in TSO mode. Append 4-byte sentinel desc */ |
2645 | if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) | 2644 | if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) |
2646 | size -= 4; | 2645 | size -= 4; |
2647 | #endif | 2646 | #endif |
2648 | /* Workaround for potential 82544 hang in PCI-X. | 2647 | /* Workaround for potential 82544 hang in PCI-X. |
2649 | * Avoid terminating buffers within evenly-aligned | 2648 | * Avoid terminating buffers within evenly-aligned |
2650 | * dwords. */ | 2649 | * dwords. */ |
2651 | if(unlikely(adapter->pcix_82544 && | 2650 | if (unlikely(adapter->pcix_82544 && |
2652 | !((unsigned long)(frag->page+offset+size-1) & 4) && | 2651 | !((unsigned long)(frag->page+offset+size-1) & 4) && |
2653 | size > 4)) | 2652 | size > 4)) |
2654 | size -= 4; | 2653 | size -= 4; |
@@ -2665,7 +2664,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2665 | len -= size; | 2664 | len -= size; |
2666 | offset += size; | 2665 | offset += size; |
2667 | count++; | 2666 | count++; |
2668 | if(unlikely(++i == tx_ring->count)) i = 0; | 2667 | if (unlikely(++i == tx_ring->count)) i = 0; |
2669 | } | 2668 | } |
2670 | } | 2669 | } |
2671 | 2670 | ||
@@ -2685,35 +2684,35 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2685 | uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; | 2684 | uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; |
2686 | unsigned int i; | 2685 | unsigned int i; |
2687 | 2686 | ||
2688 | if(likely(tx_flags & E1000_TX_FLAGS_TSO)) { | 2687 | if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { |
2689 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | | 2688 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | |
2690 | E1000_TXD_CMD_TSE; | 2689 | E1000_TXD_CMD_TSE; |
2691 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; | 2690 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; |
2692 | 2691 | ||
2693 | if(likely(tx_flags & E1000_TX_FLAGS_IPV4)) | 2692 | if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) |
2694 | txd_upper |= E1000_TXD_POPTS_IXSM << 8; | 2693 | txd_upper |= E1000_TXD_POPTS_IXSM << 8; |
2695 | } | 2694 | } |
2696 | 2695 | ||
2697 | if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) { | 2696 | if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { |
2698 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; | 2697 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; |
2699 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; | 2698 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; |
2700 | } | 2699 | } |
2701 | 2700 | ||
2702 | if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { | 2701 | if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { |
2703 | txd_lower |= E1000_TXD_CMD_VLE; | 2702 | txd_lower |= E1000_TXD_CMD_VLE; |
2704 | txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); | 2703 | txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); |
2705 | } | 2704 | } |
2706 | 2705 | ||
2707 | i = tx_ring->next_to_use; | 2706 | i = tx_ring->next_to_use; |
2708 | 2707 | ||
2709 | while(count--) { | 2708 | while (count--) { |
2710 | buffer_info = &tx_ring->buffer_info[i]; | 2709 | buffer_info = &tx_ring->buffer_info[i]; |
2711 | tx_desc = E1000_TX_DESC(*tx_ring, i); | 2710 | tx_desc = E1000_TX_DESC(*tx_ring, i); |
2712 | tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | 2711 | tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
2713 | tx_desc->lower.data = | 2712 | tx_desc->lower.data = |
2714 | cpu_to_le32(txd_lower | buffer_info->length); | 2713 | cpu_to_le32(txd_lower | buffer_info->length); |
2715 | tx_desc->upper.data = cpu_to_le32(txd_upper); | 2714 | tx_desc->upper.data = cpu_to_le32(txd_upper); |
2716 | if(unlikely(++i == tx_ring->count)) i = 0; | 2715 | if (unlikely(++i == tx_ring->count)) i = 0; |
2717 | } | 2716 | } |
2718 | 2717 | ||
2719 | tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); | 2718 | tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); |
@@ -2748,20 +2747,20 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb) | |||
2748 | 2747 | ||
2749 | E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR); | 2748 | E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR); |
2750 | 2749 | ||
2751 | if(adapter->link_duplex != HALF_DUPLEX) | 2750 | if (adapter->link_duplex != HALF_DUPLEX) |
2752 | goto no_fifo_stall_required; | 2751 | goto no_fifo_stall_required; |
2753 | 2752 | ||
2754 | if(atomic_read(&adapter->tx_fifo_stall)) | 2753 | if (atomic_read(&adapter->tx_fifo_stall)) |
2755 | return 1; | 2754 | return 1; |
2756 | 2755 | ||
2757 | if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { | 2756 | if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { |
2758 | atomic_set(&adapter->tx_fifo_stall, 1); | 2757 | atomic_set(&adapter->tx_fifo_stall, 1); |
2759 | return 1; | 2758 | return 1; |
2760 | } | 2759 | } |
2761 | 2760 | ||
2762 | no_fifo_stall_required: | 2761 | no_fifo_stall_required: |
2763 | adapter->tx_fifo_head += skb_fifo_len; | 2762 | adapter->tx_fifo_head += skb_fifo_len; |
2764 | if(adapter->tx_fifo_head >= adapter->tx_fifo_size) | 2763 | if (adapter->tx_fifo_head >= adapter->tx_fifo_size) |
2765 | adapter->tx_fifo_head -= adapter->tx_fifo_size; | 2764 | adapter->tx_fifo_head -= adapter->tx_fifo_size; |
2766 | return 0; | 2765 | return 0; |
2767 | } | 2766 | } |
@@ -2772,27 +2771,27 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) | |||
2772 | { | 2771 | { |
2773 | struct e1000_hw *hw = &adapter->hw; | 2772 | struct e1000_hw *hw = &adapter->hw; |
2774 | uint16_t length, offset; | 2773 | uint16_t length, offset; |
2775 | if(vlan_tx_tag_present(skb)) { | 2774 | if (vlan_tx_tag_present(skb)) { |
2776 | if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && | 2775 | if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && |
2777 | ( adapter->hw.mng_cookie.status & | 2776 | ( adapter->hw.mng_cookie.status & |
2778 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) | 2777 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) |
2779 | return 0; | 2778 | return 0; |
2780 | } | 2779 | } |
2781 | if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { | 2780 | if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { |
2782 | struct ethhdr *eth = (struct ethhdr *) skb->data; | 2781 | struct ethhdr *eth = (struct ethhdr *) skb->data; |
2783 | if((htons(ETH_P_IP) == eth->h_proto)) { | 2782 | if ((htons(ETH_P_IP) == eth->h_proto)) { |
2784 | const struct iphdr *ip = | 2783 | const struct iphdr *ip = |
2785 | (struct iphdr *)((uint8_t *)skb->data+14); | 2784 | (struct iphdr *)((uint8_t *)skb->data+14); |
2786 | if(IPPROTO_UDP == ip->protocol) { | 2785 | if (IPPROTO_UDP == ip->protocol) { |
2787 | struct udphdr *udp = | 2786 | struct udphdr *udp = |
2788 | (struct udphdr *)((uint8_t *)ip + | 2787 | (struct udphdr *)((uint8_t *)ip + |
2789 | (ip->ihl << 2)); | 2788 | (ip->ihl << 2)); |
2790 | if(ntohs(udp->dest) == 67) { | 2789 | if (ntohs(udp->dest) == 67) { |
2791 | offset = (uint8_t *)udp + 8 - skb->data; | 2790 | offset = (uint8_t *)udp + 8 - skb->data; |
2792 | length = skb->len - offset; | 2791 | length = skb->len - offset; |
2793 | 2792 | ||
2794 | return e1000_mng_write_dhcp_info(hw, | 2793 | return e1000_mng_write_dhcp_info(hw, |
2795 | (uint8_t *)udp + 8, | 2794 | (uint8_t *)udp + 8, |
2796 | length); | 2795 | length); |
2797 | } | 2796 | } |
2798 | } | 2797 | } |
@@ -2815,7 +2814,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2815 | unsigned int nr_frags = 0; | 2814 | unsigned int nr_frags = 0; |
2816 | unsigned int mss = 0; | 2815 | unsigned int mss = 0; |
2817 | int count = 0; | 2816 | int count = 0; |
2818 | int tso; | 2817 | int tso; |
2819 | unsigned int f; | 2818 | unsigned int f; |
2820 | len -= skb->data_len; | 2819 | len -= skb->data_len; |
2821 | 2820 | ||
@@ -2838,7 +2837,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2838 | * 4 = ceil(buffer len/mss). To make sure we don't | 2837 | * 4 = ceil(buffer len/mss). To make sure we don't |
2839 | * overrun the FIFO, adjust the max buffer len if mss | 2838 | * overrun the FIFO, adjust the max buffer len if mss |
2840 | * drops. */ | 2839 | * drops. */ |
2841 | if(mss) { | 2840 | if (mss) { |
2842 | uint8_t hdr_len; | 2841 | uint8_t hdr_len; |
2843 | max_per_txd = min(mss << 2, max_per_txd); | 2842 | max_per_txd = min(mss << 2, max_per_txd); |
2844 | max_txd_pwr = fls(max_per_txd) - 1; | 2843 | max_txd_pwr = fls(max_per_txd) - 1; |
@@ -2861,12 +2860,12 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2861 | } | 2860 | } |
2862 | } | 2861 | } |
2863 | 2862 | ||
2864 | if((mss) || (skb->ip_summed == CHECKSUM_HW)) | ||
2865 | /* reserve a descriptor for the offload context */ | 2863 | /* reserve a descriptor for the offload context */ |
2864 | if ((mss) || (skb->ip_summed == CHECKSUM_HW)) | ||
2866 | count++; | 2865 | count++; |
2867 | count++; | 2866 | count++; |
2868 | #else | 2867 | #else |
2869 | if(skb->ip_summed == CHECKSUM_HW) | 2868 | if (skb->ip_summed == CHECKSUM_HW) |
2870 | count++; | 2869 | count++; |
2871 | #endif | 2870 | #endif |
2872 | 2871 | ||
@@ -2879,24 +2878,24 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2879 | 2878 | ||
2880 | count += TXD_USE_COUNT(len, max_txd_pwr); | 2879 | count += TXD_USE_COUNT(len, max_txd_pwr); |
2881 | 2880 | ||
2882 | if(adapter->pcix_82544) | 2881 | if (adapter->pcix_82544) |
2883 | count++; | 2882 | count++; |
2884 | 2883 | ||
2885 | /* work-around for errata 10 and it applies to all controllers | 2884 | /* work-around for errata 10 and it applies to all controllers |
2886 | * in PCI-X mode, so add one more descriptor to the count | 2885 | * in PCI-X mode, so add one more descriptor to the count |
2887 | */ | 2886 | */ |
2888 | if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && | 2887 | if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && |
2889 | (len > 2015))) | 2888 | (len > 2015))) |
2890 | count++; | 2889 | count++; |
2891 | 2890 | ||
2892 | nr_frags = skb_shinfo(skb)->nr_frags; | 2891 | nr_frags = skb_shinfo(skb)->nr_frags; |
2893 | for(f = 0; f < nr_frags; f++) | 2892 | for (f = 0; f < nr_frags; f++) |
2894 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, | 2893 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, |
2895 | max_txd_pwr); | 2894 | max_txd_pwr); |
2896 | if(adapter->pcix_82544) | 2895 | if (adapter->pcix_82544) |
2897 | count += nr_frags; | 2896 | count += nr_frags; |
2898 | 2897 | ||
2899 | if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) | 2898 | if (adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) |
2900 | e1000_transfer_dhcp_info(adapter, skb); | 2899 | e1000_transfer_dhcp_info(adapter, skb); |
2901 | 2900 | ||
2902 | local_irq_save(flags); | 2901 | local_irq_save(flags); |
@@ -2914,8 +2913,8 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2914 | return NETDEV_TX_BUSY; | 2913 | return NETDEV_TX_BUSY; |
2915 | } | 2914 | } |
2916 | 2915 | ||
2917 | if(unlikely(adapter->hw.mac_type == e1000_82547)) { | 2916 | if (unlikely(adapter->hw.mac_type == e1000_82547)) { |
2918 | if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) { | 2917 | if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { |
2919 | netif_stop_queue(netdev); | 2918 | netif_stop_queue(netdev); |
2920 | mod_timer(&adapter->tx_fifo_stall_timer, jiffies); | 2919 | mod_timer(&adapter->tx_fifo_stall_timer, jiffies); |
2921 | spin_unlock_irqrestore(&tx_ring->tx_lock, flags); | 2920 | spin_unlock_irqrestore(&tx_ring->tx_lock, flags); |
@@ -2923,13 +2922,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2923 | } | 2922 | } |
2924 | } | 2923 | } |
2925 | 2924 | ||
2926 | if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { | 2925 | if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { |
2927 | tx_flags |= E1000_TX_FLAGS_VLAN; | 2926 | tx_flags |= E1000_TX_FLAGS_VLAN; |
2928 | tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); | 2927 | tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); |
2929 | } | 2928 | } |
2930 | 2929 | ||
2931 | first = tx_ring->next_to_use; | 2930 | first = tx_ring->next_to_use; |
2932 | 2931 | ||
2933 | tso = e1000_tso(adapter, tx_ring, skb); | 2932 | tso = e1000_tso(adapter, tx_ring, skb); |
2934 | if (tso < 0) { | 2933 | if (tso < 0) { |
2935 | dev_kfree_skb_any(skb); | 2934 | dev_kfree_skb_any(skb); |
@@ -3018,9 +3017,9 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3018 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3017 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3019 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | 3018 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
3020 | 3019 | ||
3021 | if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || | 3020 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || |
3022 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | 3021 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
3023 | DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); | 3022 | DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); |
3024 | return -EINVAL; | 3023 | return -EINVAL; |
3025 | } | 3024 | } |
3026 | 3025 | ||
@@ -3068,7 +3067,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3068 | 3067 | ||
3069 | netdev->mtu = new_mtu; | 3068 | netdev->mtu = new_mtu; |
3070 | 3069 | ||
3071 | if(netif_running(netdev)) { | 3070 | if (netif_running(netdev)) { |
3072 | e1000_down(adapter); | 3071 | e1000_down(adapter); |
3073 | e1000_up(adapter); | 3072 | e1000_up(adapter); |
3074 | } | 3073 | } |
@@ -3155,7 +3154,7 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3155 | hw->collision_delta = E1000_READ_REG(hw, COLC); | 3154 | hw->collision_delta = E1000_READ_REG(hw, COLC); |
3156 | adapter->stats.colc += hw->collision_delta; | 3155 | adapter->stats.colc += hw->collision_delta; |
3157 | 3156 | ||
3158 | if(hw->mac_type >= e1000_82543) { | 3157 | if (hw->mac_type >= e1000_82543) { |
3159 | adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC); | 3158 | adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC); |
3160 | adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC); | 3159 | adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC); |
3161 | adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS); | 3160 | adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS); |
@@ -3163,7 +3162,7 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3163 | adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); | 3162 | adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); |
3164 | adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); | 3163 | adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); |
3165 | } | 3164 | } |
3166 | if(hw->mac_type > e1000_82547_rev_2) { | 3165 | if (hw->mac_type > e1000_82547_rev_2) { |
3167 | adapter->stats.iac += E1000_READ_REG(hw, IAC); | 3166 | adapter->stats.iac += E1000_READ_REG(hw, IAC); |
3168 | adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); | 3167 | adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); |
3169 | adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); | 3168 | adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); |
@@ -3207,14 +3206,14 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3207 | 3206 | ||
3208 | /* Phy Stats */ | 3207 | /* Phy Stats */ |
3209 | 3208 | ||
3210 | if(hw->media_type == e1000_media_type_copper) { | 3209 | if (hw->media_type == e1000_media_type_copper) { |
3211 | if((adapter->link_speed == SPEED_1000) && | 3210 | if ((adapter->link_speed == SPEED_1000) && |
3212 | (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { | 3211 | (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { |
3213 | phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; | 3212 | phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; |
3214 | adapter->phy_stats.idle_errors += phy_tmp; | 3213 | adapter->phy_stats.idle_errors += phy_tmp; |
3215 | } | 3214 | } |
3216 | 3215 | ||
3217 | if((hw->mac_type <= e1000_82546) && | 3216 | if ((hw->mac_type <= e1000_82546) && |
3218 | (hw->phy_type == e1000_phy_m88) && | 3217 | (hw->phy_type == e1000_phy_m88) && |
3219 | !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) | 3218 | !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) |
3220 | adapter->phy_stats.receive_errors += phy_tmp; | 3219 | adapter->phy_stats.receive_errors += phy_tmp; |
@@ -3279,7 +3278,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3279 | return IRQ_NONE; /* Not our interrupt */ | 3278 | return IRQ_NONE; /* Not our interrupt */ |
3280 | } | 3279 | } |
3281 | 3280 | ||
3282 | if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { | 3281 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { |
3283 | hw->get_link_status = 1; | 3282 | hw->get_link_status = 1; |
3284 | mod_timer(&adapter->watchdog_timer, jiffies); | 3283 | mod_timer(&adapter->watchdog_timer, jiffies); |
3285 | } | 3284 | } |
@@ -3311,26 +3310,26 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3311 | 3310 | ||
3312 | #else /* if !CONFIG_E1000_NAPI */ | 3311 | #else /* if !CONFIG_E1000_NAPI */ |
3313 | /* Writing IMC and IMS is needed for 82547. | 3312 | /* Writing IMC and IMS is needed for 82547. |
3314 | Due to Hub Link bus being occupied, an interrupt | 3313 | * Due to Hub Link bus being occupied, an interrupt |
3315 | de-assertion message is not able to be sent. | 3314 | * de-assertion message is not able to be sent. |
3316 | When an interrupt assertion message is generated later, | 3315 | * When an interrupt assertion message is generated later, |
3317 | two messages are re-ordered and sent out. | 3316 | * two messages are re-ordered and sent out. |
3318 | That causes APIC to think 82547 is in de-assertion | 3317 | * That causes APIC to think 82547 is in de-assertion |
3319 | state, while 82547 is in assertion state, resulting | 3318 | * state, while 82547 is in assertion state, resulting |
3320 | in dead lock. Writing IMC forces 82547 into | 3319 | * in dead lock. Writing IMC forces 82547 into |
3321 | de-assertion state. | 3320 | * de-assertion state. |
3322 | */ | 3321 | */ |
3323 | if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){ | 3322 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) { |
3324 | atomic_inc(&adapter->irq_sem); | 3323 | atomic_inc(&adapter->irq_sem); |
3325 | E1000_WRITE_REG(hw, IMC, ~0); | 3324 | E1000_WRITE_REG(hw, IMC, ~0); |
3326 | } | 3325 | } |
3327 | 3326 | ||
3328 | for(i = 0; i < E1000_MAX_INTR; i++) | 3327 | for (i = 0; i < E1000_MAX_INTR; i++) |
3329 | if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & | 3328 | if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & |
3330 | !e1000_clean_tx_irq(adapter, adapter->tx_ring))) | 3329 | !e1000_clean_tx_irq(adapter, adapter->tx_ring))) |
3331 | break; | 3330 | break; |
3332 | 3331 | ||
3333 | if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) | 3332 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) |
3334 | e1000_irq_enable(adapter); | 3333 | e1000_irq_enable(adapter); |
3335 | 3334 | ||
3336 | #endif /* CONFIG_E1000_NAPI */ | 3335 | #endif /* CONFIG_E1000_NAPI */ |
@@ -3382,9 +3381,9 @@ e1000_clean(struct net_device *poll_dev, int *budget) | |||
3382 | 3381 | ||
3383 | *budget -= work_done; | 3382 | *budget -= work_done; |
3384 | poll_dev->quota -= work_done; | 3383 | poll_dev->quota -= work_done; |
3385 | 3384 | ||
3386 | /* If no Tx and not enough Rx work done, exit the polling mode */ | 3385 | /* If no Tx and not enough Rx work done, exit the polling mode */ |
3387 | if((!tx_cleaned && (work_done == 0)) || | 3386 | if ((!tx_cleaned && (work_done == 0)) || |
3388 | !netif_running(adapter->netdev)) { | 3387 | !netif_running(adapter->netdev)) { |
3389 | quit_polling: | 3388 | quit_polling: |
3390 | netif_rx_complete(poll_dev); | 3389 | netif_rx_complete(poll_dev); |
@@ -3416,7 +3415,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3416 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | 3415 | eop_desc = E1000_TX_DESC(*tx_ring, eop); |
3417 | 3416 | ||
3418 | while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { | 3417 | while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { |
3419 | for(cleaned = FALSE; !cleaned; ) { | 3418 | for (cleaned = FALSE; !cleaned; ) { |
3420 | tx_desc = E1000_TX_DESC(*tx_ring, i); | 3419 | tx_desc = E1000_TX_DESC(*tx_ring, i); |
3421 | buffer_info = &tx_ring->buffer_info[i]; | 3420 | buffer_info = &tx_ring->buffer_info[i]; |
3422 | cleaned = (i == eop); | 3421 | cleaned = (i == eop); |
@@ -3427,7 +3426,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3427 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); | 3426 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
3428 | memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); | 3427 | memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); |
3429 | 3428 | ||
3430 | if(unlikely(++i == tx_ring->count)) i = 0; | 3429 | if (unlikely(++i == tx_ring->count)) i = 0; |
3431 | } | 3430 | } |
3432 | 3431 | ||
3433 | #ifdef CONFIG_E1000_MQ | 3432 | #ifdef CONFIG_E1000_MQ |
@@ -3442,7 +3441,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3442 | 3441 | ||
3443 | spin_lock(&tx_ring->tx_lock); | 3442 | spin_lock(&tx_ring->tx_lock); |
3444 | 3443 | ||
3445 | if(unlikely(cleaned && netif_queue_stopped(netdev) && | 3444 | if (unlikely(cleaned && netif_queue_stopped(netdev) && |
3446 | netif_carrier_ok(netdev))) | 3445 | netif_carrier_ok(netdev))) |
3447 | netif_wake_queue(netdev); | 3446 | netif_wake_queue(netdev); |
3448 | 3447 | ||
@@ -3504,21 +3503,21 @@ e1000_rx_checksum(struct e1000_adapter *adapter, | |||
3504 | skb->ip_summed = CHECKSUM_NONE; | 3503 | skb->ip_summed = CHECKSUM_NONE; |
3505 | 3504 | ||
3506 | /* 82543 or newer only */ | 3505 | /* 82543 or newer only */ |
3507 | if(unlikely(adapter->hw.mac_type < e1000_82543)) return; | 3506 | if (unlikely(adapter->hw.mac_type < e1000_82543)) return; |
3508 | /* Ignore Checksum bit is set */ | 3507 | /* Ignore Checksum bit is set */ |
3509 | if(unlikely(status & E1000_RXD_STAT_IXSM)) return; | 3508 | if (unlikely(status & E1000_RXD_STAT_IXSM)) return; |
3510 | /* TCP/UDP checksum error bit is set */ | 3509 | /* TCP/UDP checksum error bit is set */ |
3511 | if(unlikely(errors & E1000_RXD_ERR_TCPE)) { | 3510 | if (unlikely(errors & E1000_RXD_ERR_TCPE)) { |
3512 | /* let the stack verify checksum errors */ | 3511 | /* let the stack verify checksum errors */ |
3513 | adapter->hw_csum_err++; | 3512 | adapter->hw_csum_err++; |
3514 | return; | 3513 | return; |
3515 | } | 3514 | } |
3516 | /* TCP/UDP Checksum has not been calculated */ | 3515 | /* TCP/UDP Checksum has not been calculated */ |
3517 | if(adapter->hw.mac_type <= e1000_82547_rev_2) { | 3516 | if (adapter->hw.mac_type <= e1000_82547_rev_2) { |
3518 | if(!(status & E1000_RXD_STAT_TCPCS)) | 3517 | if (!(status & E1000_RXD_STAT_TCPCS)) |
3519 | return; | 3518 | return; |
3520 | } else { | 3519 | } else { |
3521 | if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) | 3520 | if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) |
3522 | return; | 3521 | return; |
3523 | } | 3522 | } |
3524 | /* It must be a TCP or UDP packet with a valid checksum */ | 3523 | /* It must be a TCP or UDP packet with a valid checksum */ |
@@ -3571,7 +3570,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3571 | struct sk_buff *skb; | 3570 | struct sk_buff *skb; |
3572 | u8 status; | 3571 | u8 status; |
3573 | #ifdef CONFIG_E1000_NAPI | 3572 | #ifdef CONFIG_E1000_NAPI |
3574 | if(*work_done >= work_to_do) | 3573 | if (*work_done >= work_to_do) |
3575 | break; | 3574 | break; |
3576 | (*work_done)++; | 3575 | (*work_done)++; |
3577 | #endif | 3576 | #endif |
@@ -3625,7 +3624,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3625 | } | 3624 | } |
3626 | } | 3625 | } |
3627 | 3626 | ||
3628 | if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { | 3627 | if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { |
3629 | last_byte = *(skb->data + length - 1); | 3628 | last_byte = *(skb->data + length - 1); |
3630 | if (TBI_ACCEPT(&adapter->hw, status, | 3629 | if (TBI_ACCEPT(&adapter->hw, status, |
3631 | rx_desc->errors, length, last_byte)) { | 3630 | rx_desc->errors, length, last_byte)) { |
@@ -3672,9 +3671,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3672 | (uint32_t)(status) | | 3671 | (uint32_t)(status) | |
3673 | ((uint32_t)(rx_desc->errors) << 24), | 3672 | ((uint32_t)(rx_desc->errors) << 24), |
3674 | rx_desc->csum, skb); | 3673 | rx_desc->csum, skb); |
3674 | |||
3675 | skb->protocol = eth_type_trans(skb, netdev); | 3675 | skb->protocol = eth_type_trans(skb, netdev); |
3676 | #ifdef CONFIG_E1000_NAPI | 3676 | #ifdef CONFIG_E1000_NAPI |
3677 | if(unlikely(adapter->vlgrp && | 3677 | if (unlikely(adapter->vlgrp && |
3678 | (status & E1000_RXD_STAT_VP))) { | 3678 | (status & E1000_RXD_STAT_VP))) { |
3679 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | 3679 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, |
3680 | le16_to_cpu(rx_desc->special) & | 3680 | le16_to_cpu(rx_desc->special) & |
@@ -3683,7 +3683,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3683 | netif_receive_skb(skb); | 3683 | netif_receive_skb(skb); |
3684 | } | 3684 | } |
3685 | #else /* CONFIG_E1000_NAPI */ | 3685 | #else /* CONFIG_E1000_NAPI */ |
3686 | if(unlikely(adapter->vlgrp && | 3686 | if (unlikely(adapter->vlgrp && |
3687 | (status & E1000_RXD_STAT_VP))) { | 3687 | (status & E1000_RXD_STAT_VP))) { |
3688 | vlan_hwaccel_rx(skb, adapter->vlgrp, | 3688 | vlan_hwaccel_rx(skb, adapter->vlgrp, |
3689 | le16_to_cpu(rx_desc->special) & | 3689 | le16_to_cpu(rx_desc->special) & |
@@ -3748,12 +3748,12 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3748 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | 3748 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
3749 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); | 3749 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); |
3750 | 3750 | ||
3751 | while(staterr & E1000_RXD_STAT_DD) { | 3751 | while (staterr & E1000_RXD_STAT_DD) { |
3752 | buffer_info = &rx_ring->buffer_info[i]; | 3752 | buffer_info = &rx_ring->buffer_info[i]; |
3753 | ps_page = &rx_ring->ps_page[i]; | 3753 | ps_page = &rx_ring->ps_page[i]; |
3754 | ps_page_dma = &rx_ring->ps_page_dma[i]; | 3754 | ps_page_dma = &rx_ring->ps_page_dma[i]; |
3755 | #ifdef CONFIG_E1000_NAPI | 3755 | #ifdef CONFIG_E1000_NAPI |
3756 | if(unlikely(*work_done >= work_to_do)) | 3756 | if (unlikely(*work_done >= work_to_do)) |
3757 | break; | 3757 | break; |
3758 | (*work_done)++; | 3758 | (*work_done)++; |
3759 | #endif | 3759 | #endif |
@@ -3765,21 +3765,21 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3765 | 3765 | ||
3766 | skb = buffer_info->skb; | 3766 | skb = buffer_info->skb; |
3767 | 3767 | ||
3768 | if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) { | 3768 | if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) { |
3769 | E1000_DBG("%s: Packet Split buffers didn't pick up" | 3769 | E1000_DBG("%s: Packet Split buffers didn't pick up" |
3770 | " the full packet\n", netdev->name); | 3770 | " the full packet\n", netdev->name); |
3771 | dev_kfree_skb_irq(skb); | 3771 | dev_kfree_skb_irq(skb); |
3772 | goto next_desc; | 3772 | goto next_desc; |
3773 | } | 3773 | } |
3774 | 3774 | ||
3775 | if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { | 3775 | if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { |
3776 | dev_kfree_skb_irq(skb); | 3776 | dev_kfree_skb_irq(skb); |
3777 | goto next_desc; | 3777 | goto next_desc; |
3778 | } | 3778 | } |
3779 | 3779 | ||
3780 | length = le16_to_cpu(rx_desc->wb.middle.length0); | 3780 | length = le16_to_cpu(rx_desc->wb.middle.length0); |
3781 | 3781 | ||
3782 | if(unlikely(!length)) { | 3782 | if (unlikely(!length)) { |
3783 | E1000_DBG("%s: Last part of the packet spanning" | 3783 | E1000_DBG("%s: Last part of the packet spanning" |
3784 | " multiple descriptors\n", netdev->name); | 3784 | " multiple descriptors\n", netdev->name); |
3785 | dev_kfree_skb_irq(skb); | 3785 | dev_kfree_skb_irq(skb); |
@@ -3789,8 +3789,8 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3789 | /* Good Receive */ | 3789 | /* Good Receive */ |
3790 | skb_put(skb, length); | 3790 | skb_put(skb, length); |
3791 | 3791 | ||
3792 | for(j = 0; j < adapter->rx_ps_pages; j++) { | 3792 | for (j = 0; j < adapter->rx_ps_pages; j++) { |
3793 | if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) | 3793 | if (!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) |
3794 | break; | 3794 | break; |
3795 | 3795 | ||
3796 | pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], | 3796 | pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], |
@@ -3810,11 +3810,11 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3810 | rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); | 3810 | rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); |
3811 | skb->protocol = eth_type_trans(skb, netdev); | 3811 | skb->protocol = eth_type_trans(skb, netdev); |
3812 | 3812 | ||
3813 | if(likely(rx_desc->wb.upper.header_status & | 3813 | if (likely(rx_desc->wb.upper.header_status & |
3814 | E1000_RXDPS_HDRSTAT_HDRSP)) | 3814 | E1000_RXDPS_HDRSTAT_HDRSP)) |
3815 | adapter->rx_hdr_split++; | 3815 | adapter->rx_hdr_split++; |
3816 | #ifdef CONFIG_E1000_NAPI | 3816 | #ifdef CONFIG_E1000_NAPI |
3817 | if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { | 3817 | if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { |
3818 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | 3818 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, |
3819 | le16_to_cpu(rx_desc->wb.middle.vlan) & | 3819 | le16_to_cpu(rx_desc->wb.middle.vlan) & |
3820 | E1000_RXD_SPC_VLAN_MASK); | 3820 | E1000_RXD_SPC_VLAN_MASK); |
@@ -3822,7 +3822,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3822 | netif_receive_skb(skb); | 3822 | netif_receive_skb(skb); |
3823 | } | 3823 | } |
3824 | #else /* CONFIG_E1000_NAPI */ | 3824 | #else /* CONFIG_E1000_NAPI */ |
3825 | if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { | 3825 | if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { |
3826 | vlan_hwaccel_rx(skb, adapter->vlgrp, | 3826 | vlan_hwaccel_rx(skb, adapter->vlgrp, |
3827 | le16_to_cpu(rx_desc->wb.middle.vlan) & | 3827 | le16_to_cpu(rx_desc->wb.middle.vlan) & |
3828 | E1000_RXD_SPC_VLAN_MASK); | 3828 | E1000_RXD_SPC_VLAN_MASK); |
@@ -3887,7 +3887,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
3887 | } | 3887 | } |
3888 | 3888 | ||
3889 | 3889 | ||
3890 | if(unlikely(!skb)) { | 3890 | if (unlikely(!skb)) { |
3891 | /* Better luck next round */ | 3891 | /* Better luck next round */ |
3892 | adapter->alloc_rx_buff_failed++; | 3892 | adapter->alloc_rx_buff_failed++; |
3893 | break; | 3893 | break; |
@@ -3952,7 +3952,8 @@ map_skb: | |||
3952 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 3952 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
3953 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | 3953 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
3954 | 3954 | ||
3955 | if(unlikely(++i == rx_ring->count)) i = 0; | 3955 | if (unlikely(++i == rx_ring->count)) |
3956 | i = 0; | ||
3956 | buffer_info = &rx_ring->buffer_info[i]; | 3957 | buffer_info = &rx_ring->buffer_info[i]; |
3957 | } | 3958 | } |
3958 | 3959 | ||
@@ -3997,7 +3998,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
3997 | while (cleaned_count--) { | 3998 | while (cleaned_count--) { |
3998 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | 3999 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
3999 | 4000 | ||
4000 | for(j = 0; j < PS_PAGE_BUFFERS; j++) { | 4001 | for (j = 0; j < PS_PAGE_BUFFERS; j++) { |
4001 | if (j < adapter->rx_ps_pages) { | 4002 | if (j < adapter->rx_ps_pages) { |
4002 | if (likely(!ps_page->ps_page[j])) { | 4003 | if (likely(!ps_page->ps_page[j])) { |
4003 | ps_page->ps_page[j] = | 4004 | ps_page->ps_page[j] = |
@@ -4013,7 +4014,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
4013 | PCI_DMA_FROMDEVICE); | 4014 | PCI_DMA_FROMDEVICE); |
4014 | } | 4015 | } |
4015 | /* Refresh the desc even if buffer_addrs didn't | 4016 | /* Refresh the desc even if buffer_addrs didn't |
4016 | * change because each write-back erases | 4017 | * change because each write-back erases |
4017 | * this info. | 4018 | * this info. |
4018 | */ | 4019 | */ |
4019 | rx_desc->read.buffer_addr[j+1] = | 4020 | rx_desc->read.buffer_addr[j+1] = |
@@ -4045,7 +4046,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
4045 | 4046 | ||
4046 | rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); | 4047 | rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); |
4047 | 4048 | ||
4048 | if(unlikely(++i == rx_ring->count)) i = 0; | 4049 | if (unlikely(++i == rx_ring->count)) i = 0; |
4049 | buffer_info = &rx_ring->buffer_info[i]; | 4050 | buffer_info = &rx_ring->buffer_info[i]; |
4050 | ps_page = &rx_ring->ps_page[i]; | 4051 | ps_page = &rx_ring->ps_page[i]; |
4051 | ps_page_dma = &rx_ring->ps_page_dma[i]; | 4052 | ps_page_dma = &rx_ring->ps_page_dma[i]; |
@@ -4080,24 +4081,24 @@ e1000_smartspeed(struct e1000_adapter *adapter) | |||
4080 | uint16_t phy_status; | 4081 | uint16_t phy_status; |
4081 | uint16_t phy_ctrl; | 4082 | uint16_t phy_ctrl; |
4082 | 4083 | ||
4083 | if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || | 4084 | if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || |
4084 | !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) | 4085 | !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) |
4085 | return; | 4086 | return; |
4086 | 4087 | ||
4087 | if(adapter->smartspeed == 0) { | 4088 | if (adapter->smartspeed == 0) { |
4088 | /* If Master/Slave config fault is asserted twice, | 4089 | /* If Master/Slave config fault is asserted twice, |
4089 | * we assume back-to-back */ | 4090 | * we assume back-to-back */ |
4090 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); | 4091 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); |
4091 | if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; | 4092 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; |
4092 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); | 4093 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); |
4093 | if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; | 4094 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; |
4094 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); | 4095 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); |
4095 | if(phy_ctrl & CR_1000T_MS_ENABLE) { | 4096 | if (phy_ctrl & CR_1000T_MS_ENABLE) { |
4096 | phy_ctrl &= ~CR_1000T_MS_ENABLE; | 4097 | phy_ctrl &= ~CR_1000T_MS_ENABLE; |
4097 | e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, | 4098 | e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, |
4098 | phy_ctrl); | 4099 | phy_ctrl); |
4099 | adapter->smartspeed++; | 4100 | adapter->smartspeed++; |
4100 | if(!e1000_phy_setup_autoneg(&adapter->hw) && | 4101 | if (!e1000_phy_setup_autoneg(&adapter->hw) && |
4101 | !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, | 4102 | !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, |
4102 | &phy_ctrl)) { | 4103 | &phy_ctrl)) { |
4103 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | | 4104 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | |
@@ -4107,12 +4108,12 @@ e1000_smartspeed(struct e1000_adapter *adapter) | |||
4107 | } | 4108 | } |
4108 | } | 4109 | } |
4109 | return; | 4110 | return; |
4110 | } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { | 4111 | } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { |
4111 | /* If still no link, perhaps using 2/3 pair cable */ | 4112 | /* If still no link, perhaps using 2/3 pair cable */ |
4112 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); | 4113 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); |
4113 | phy_ctrl |= CR_1000T_MS_ENABLE; | 4114 | phy_ctrl |= CR_1000T_MS_ENABLE; |
4114 | e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl); | 4115 | e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl); |
4115 | if(!e1000_phy_setup_autoneg(&adapter->hw) && | 4116 | if (!e1000_phy_setup_autoneg(&adapter->hw) && |
4116 | !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) { | 4117 | !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) { |
4117 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | | 4118 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | |
4118 | MII_CR_RESTART_AUTO_NEG); | 4119 | MII_CR_RESTART_AUTO_NEG); |
@@ -4120,7 +4121,7 @@ e1000_smartspeed(struct e1000_adapter *adapter) | |||
4120 | } | 4121 | } |
4121 | } | 4122 | } |
4122 | /* Restart process after E1000_SMARTSPEED_MAX iterations */ | 4123 | /* Restart process after E1000_SMARTSPEED_MAX iterations */ |
4123 | if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX) | 4124 | if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) |
4124 | adapter->smartspeed = 0; | 4125 | adapter->smartspeed = 0; |
4125 | } | 4126 | } |
4126 | 4127 | ||
@@ -4161,7 +4162,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4161 | uint16_t spddplx; | 4162 | uint16_t spddplx; |
4162 | unsigned long flags; | 4163 | unsigned long flags; |
4163 | 4164 | ||
4164 | if(adapter->hw.media_type != e1000_media_type_copper) | 4165 | if (adapter->hw.media_type != e1000_media_type_copper) |
4165 | return -EOPNOTSUPP; | 4166 | return -EOPNOTSUPP; |
4166 | 4167 | ||
4167 | switch (cmd) { | 4168 | switch (cmd) { |
@@ -4169,10 +4170,10 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4169 | data->phy_id = adapter->hw.phy_addr; | 4170 | data->phy_id = adapter->hw.phy_addr; |
4170 | break; | 4171 | break; |
4171 | case SIOCGMIIREG: | 4172 | case SIOCGMIIREG: |
4172 | if(!capable(CAP_NET_ADMIN)) | 4173 | if (!capable(CAP_NET_ADMIN)) |
4173 | return -EPERM; | 4174 | return -EPERM; |
4174 | spin_lock_irqsave(&adapter->stats_lock, flags); | 4175 | spin_lock_irqsave(&adapter->stats_lock, flags); |
4175 | if(e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, | 4176 | if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, |
4176 | &data->val_out)) { | 4177 | &data->val_out)) { |
4177 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 4178 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
4178 | return -EIO; | 4179 | return -EIO; |
@@ -4180,23 +4181,23 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4180 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 4181 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
4181 | break; | 4182 | break; |
4182 | case SIOCSMIIREG: | 4183 | case SIOCSMIIREG: |
4183 | if(!capable(CAP_NET_ADMIN)) | 4184 | if (!capable(CAP_NET_ADMIN)) |
4184 | return -EPERM; | 4185 | return -EPERM; |
4185 | if(data->reg_num & ~(0x1F)) | 4186 | if (data->reg_num & ~(0x1F)) |
4186 | return -EFAULT; | 4187 | return -EFAULT; |
4187 | mii_reg = data->val_in; | 4188 | mii_reg = data->val_in; |
4188 | spin_lock_irqsave(&adapter->stats_lock, flags); | 4189 | spin_lock_irqsave(&adapter->stats_lock, flags); |
4189 | if(e1000_write_phy_reg(&adapter->hw, data->reg_num, | 4190 | if (e1000_write_phy_reg(&adapter->hw, data->reg_num, |
4190 | mii_reg)) { | 4191 | mii_reg)) { |
4191 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 4192 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
4192 | return -EIO; | 4193 | return -EIO; |
4193 | } | 4194 | } |
4194 | if(adapter->hw.phy_type == e1000_phy_m88) { | 4195 | if (adapter->hw.phy_type == e1000_phy_m88) { |
4195 | switch (data->reg_num) { | 4196 | switch (data->reg_num) { |
4196 | case PHY_CTRL: | 4197 | case PHY_CTRL: |
4197 | if(mii_reg & MII_CR_POWER_DOWN) | 4198 | if (mii_reg & MII_CR_POWER_DOWN) |
4198 | break; | 4199 | break; |
4199 | if(mii_reg & MII_CR_AUTO_NEG_EN) { | 4200 | if (mii_reg & MII_CR_AUTO_NEG_EN) { |
4200 | adapter->hw.autoneg = 1; | 4201 | adapter->hw.autoneg = 1; |
4201 | adapter->hw.autoneg_advertised = 0x2F; | 4202 | adapter->hw.autoneg_advertised = 0x2F; |
4202 | } else { | 4203 | } else { |
@@ -4211,14 +4212,14 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4211 | HALF_DUPLEX; | 4212 | HALF_DUPLEX; |
4212 | retval = e1000_set_spd_dplx(adapter, | 4213 | retval = e1000_set_spd_dplx(adapter, |
4213 | spddplx); | 4214 | spddplx); |
4214 | if(retval) { | 4215 | if (retval) { |
4215 | spin_unlock_irqrestore( | 4216 | spin_unlock_irqrestore( |
4216 | &adapter->stats_lock, | 4217 | &adapter->stats_lock, |
4217 | flags); | 4218 | flags); |
4218 | return retval; | 4219 | return retval; |
4219 | } | 4220 | } |
4220 | } | 4221 | } |
4221 | if(netif_running(adapter->netdev)) { | 4222 | if (netif_running(adapter->netdev)) { |
4222 | e1000_down(adapter); | 4223 | e1000_down(adapter); |
4223 | e1000_up(adapter); | 4224 | e1000_up(adapter); |
4224 | } else | 4225 | } else |
@@ -4226,7 +4227,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4226 | break; | 4227 | break; |
4227 | case M88E1000_PHY_SPEC_CTRL: | 4228 | case M88E1000_PHY_SPEC_CTRL: |
4228 | case M88E1000_EXT_PHY_SPEC_CTRL: | 4229 | case M88E1000_EXT_PHY_SPEC_CTRL: |
4229 | if(e1000_phy_reset(&adapter->hw)) { | 4230 | if (e1000_phy_reset(&adapter->hw)) { |
4230 | spin_unlock_irqrestore( | 4231 | spin_unlock_irqrestore( |
4231 | &adapter->stats_lock, flags); | 4232 | &adapter->stats_lock, flags); |
4232 | return -EIO; | 4233 | return -EIO; |
@@ -4236,9 +4237,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4236 | } else { | 4237 | } else { |
4237 | switch (data->reg_num) { | 4238 | switch (data->reg_num) { |
4238 | case PHY_CTRL: | 4239 | case PHY_CTRL: |
4239 | if(mii_reg & MII_CR_POWER_DOWN) | 4240 | if (mii_reg & MII_CR_POWER_DOWN) |
4240 | break; | 4241 | break; |
4241 | if(netif_running(adapter->netdev)) { | 4242 | if (netif_running(adapter->netdev)) { |
4242 | e1000_down(adapter); | 4243 | e1000_down(adapter); |
4243 | e1000_up(adapter); | 4244 | e1000_up(adapter); |
4244 | } else | 4245 | } else |
@@ -4260,7 +4261,7 @@ e1000_pci_set_mwi(struct e1000_hw *hw) | |||
4260 | struct e1000_adapter *adapter = hw->back; | 4261 | struct e1000_adapter *adapter = hw->back; |
4261 | int ret_val = pci_set_mwi(adapter->pdev); | 4262 | int ret_val = pci_set_mwi(adapter->pdev); |
4262 | 4263 | ||
4263 | if(ret_val) | 4264 | if (ret_val) |
4264 | DPRINTK(PROBE, ERR, "Error in setting MWI\n"); | 4265 | DPRINTK(PROBE, ERR, "Error in setting MWI\n"); |
4265 | } | 4266 | } |
4266 | 4267 | ||
@@ -4309,7 +4310,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
4309 | e1000_irq_disable(adapter); | 4310 | e1000_irq_disable(adapter); |
4310 | adapter->vlgrp = grp; | 4311 | adapter->vlgrp = grp; |
4311 | 4312 | ||
4312 | if(grp) { | 4313 | if (grp) { |
4313 | /* enable VLAN tag insert/strip */ | 4314 | /* enable VLAN tag insert/strip */ |
4314 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); | 4315 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); |
4315 | ctrl |= E1000_CTRL_VME; | 4316 | ctrl |= E1000_CTRL_VME; |
@@ -4331,7 +4332,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
4331 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 4332 | rctl = E1000_READ_REG(&adapter->hw, RCTL); |
4332 | rctl &= ~E1000_RCTL_VFE; | 4333 | rctl &= ~E1000_RCTL_VFE; |
4333 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 4334 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); |
4334 | if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) { | 4335 | if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) { |
4335 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | 4336 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); |
4336 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | 4337 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
4337 | } | 4338 | } |
@@ -4345,9 +4346,10 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid) | |||
4345 | { | 4346 | { |
4346 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4347 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4347 | uint32_t vfta, index; | 4348 | uint32_t vfta, index; |
4348 | if((adapter->hw.mng_cookie.status & | 4349 | |
4349 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && | 4350 | if ((adapter->hw.mng_cookie.status & |
4350 | (vid == adapter->mng_vlan_id)) | 4351 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
4352 | (vid == adapter->mng_vlan_id)) | ||
4351 | return; | 4353 | return; |
4352 | /* add VID to filter table */ | 4354 | /* add VID to filter table */ |
4353 | index = (vid >> 5) & 0x7F; | 4355 | index = (vid >> 5) & 0x7F; |
@@ -4364,13 +4366,13 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) | |||
4364 | 4366 | ||
4365 | e1000_irq_disable(adapter); | 4367 | e1000_irq_disable(adapter); |
4366 | 4368 | ||
4367 | if(adapter->vlgrp) | 4369 | if (adapter->vlgrp) |
4368 | adapter->vlgrp->vlan_devices[vid] = NULL; | 4370 | adapter->vlgrp->vlan_devices[vid] = NULL; |
4369 | 4371 | ||
4370 | e1000_irq_enable(adapter); | 4372 | e1000_irq_enable(adapter); |
4371 | 4373 | ||
4372 | if((adapter->hw.mng_cookie.status & | 4374 | if ((adapter->hw.mng_cookie.status & |
4373 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && | 4375 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
4374 | (vid == adapter->mng_vlan_id)) { | 4376 | (vid == adapter->mng_vlan_id)) { |
4375 | /* release control to f/w */ | 4377 | /* release control to f/w */ |
4376 | e1000_release_hw_control(adapter); | 4378 | e1000_release_hw_control(adapter); |
@@ -4389,10 +4391,10 @@ e1000_restore_vlan(struct e1000_adapter *adapter) | |||
4389 | { | 4391 | { |
4390 | e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); | 4392 | e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); |
4391 | 4393 | ||
4392 | if(adapter->vlgrp) { | 4394 | if (adapter->vlgrp) { |
4393 | uint16_t vid; | 4395 | uint16_t vid; |
4394 | for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | 4396 | for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { |
4395 | if(!adapter->vlgrp->vlan_devices[vid]) | 4397 | if (!adapter->vlgrp->vlan_devices[vid]) |
4396 | continue; | 4398 | continue; |
4397 | e1000_vlan_rx_add_vid(adapter->netdev, vid); | 4399 | e1000_vlan_rx_add_vid(adapter->netdev, vid); |
4398 | } | 4400 | } |
@@ -4405,13 +4407,13 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx) | |||
4405 | adapter->hw.autoneg = 0; | 4407 | adapter->hw.autoneg = 0; |
4406 | 4408 | ||
4407 | /* Fiber NICs only allow 1000 gbps Full duplex */ | 4409 | /* Fiber NICs only allow 1000 gbps Full duplex */ |
4408 | if((adapter->hw.media_type == e1000_media_type_fiber) && | 4410 | if ((adapter->hw.media_type == e1000_media_type_fiber) && |
4409 | spddplx != (SPEED_1000 + DUPLEX_FULL)) { | 4411 | spddplx != (SPEED_1000 + DUPLEX_FULL)) { |
4410 | DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); | 4412 | DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); |
4411 | return -EINVAL; | 4413 | return -EINVAL; |
4412 | } | 4414 | } |
4413 | 4415 | ||
4414 | switch(spddplx) { | 4416 | switch (spddplx) { |
4415 | case SPEED_10 + DUPLEX_HALF: | 4417 | case SPEED_10 + DUPLEX_HALF: |
4416 | adapter->hw.forced_speed_duplex = e1000_10_half; | 4418 | adapter->hw.forced_speed_duplex = e1000_10_half; |
4417 | break; | 4419 | break; |
@@ -4496,7 +4498,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4496 | 4498 | ||
4497 | netif_device_detach(netdev); | 4499 | netif_device_detach(netdev); |
4498 | 4500 | ||
4499 | if(netif_running(netdev)) | 4501 | if (netif_running(netdev)) |
4500 | e1000_down(adapter); | 4502 | e1000_down(adapter); |
4501 | 4503 | ||
4502 | #ifdef CONFIG_PM | 4504 | #ifdef CONFIG_PM |
@@ -4508,21 +4510,21 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4508 | #endif | 4510 | #endif |
4509 | 4511 | ||
4510 | status = E1000_READ_REG(&adapter->hw, STATUS); | 4512 | status = E1000_READ_REG(&adapter->hw, STATUS); |
4511 | if(status & E1000_STATUS_LU) | 4513 | if (status & E1000_STATUS_LU) |
4512 | wufc &= ~E1000_WUFC_LNKC; | 4514 | wufc &= ~E1000_WUFC_LNKC; |
4513 | 4515 | ||
4514 | if(wufc) { | 4516 | if (wufc) { |
4515 | e1000_setup_rctl(adapter); | 4517 | e1000_setup_rctl(adapter); |
4516 | e1000_set_multi(netdev); | 4518 | e1000_set_multi(netdev); |
4517 | 4519 | ||
4518 | /* turn on all-multi mode if wake on multicast is enabled */ | 4520 | /* turn on all-multi mode if wake on multicast is enabled */ |
4519 | if(adapter->wol & E1000_WUFC_MC) { | 4521 | if (adapter->wol & E1000_WUFC_MC) { |
4520 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 4522 | rctl = E1000_READ_REG(&adapter->hw, RCTL); |
4521 | rctl |= E1000_RCTL_MPE; | 4523 | rctl |= E1000_RCTL_MPE; |
4522 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 4524 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); |
4523 | } | 4525 | } |
4524 | 4526 | ||
4525 | if(adapter->hw.mac_type >= e1000_82540) { | 4527 | if (adapter->hw.mac_type >= e1000_82540) { |
4526 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); | 4528 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); |
4527 | /* advertise wake from D3Cold */ | 4529 | /* advertise wake from D3Cold */ |
4528 | #define E1000_CTRL_ADVD3WUC 0x00100000 | 4530 | #define E1000_CTRL_ADVD3WUC 0x00100000 |
@@ -4533,7 +4535,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4533 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); | 4535 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); |
4534 | } | 4536 | } |
4535 | 4537 | ||
4536 | if(adapter->hw.media_type == e1000_media_type_fiber || | 4538 | if (adapter->hw.media_type == e1000_media_type_fiber || |
4537 | adapter->hw.media_type == e1000_media_type_internal_serdes) { | 4539 | adapter->hw.media_type == e1000_media_type_internal_serdes) { |
4538 | /* keep the laser running in D3 */ | 4540 | /* keep the laser running in D3 */ |
4539 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | 4541 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); |
@@ -4563,10 +4565,10 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4563 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); | 4565 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); |
4564 | } | 4566 | } |
4565 | 4567 | ||
4566 | if(adapter->hw.mac_type >= e1000_82540 && | 4568 | if (adapter->hw.mac_type >= e1000_82540 && |
4567 | adapter->hw.media_type == e1000_media_type_copper) { | 4569 | adapter->hw.media_type == e1000_media_type_copper) { |
4568 | manc = E1000_READ_REG(&adapter->hw, MANC); | 4570 | manc = E1000_READ_REG(&adapter->hw, MANC); |
4569 | if(manc & E1000_MANC_SMBUS_EN) { | 4571 | if (manc & E1000_MANC_SMBUS_EN) { |
4570 | manc |= E1000_MANC_ARP_EN; | 4572 | manc |= E1000_MANC_ARP_EN; |
4571 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | 4573 | E1000_WRITE_REG(&adapter->hw, MANC, manc); |
4572 | retval = pci_enable_wake(pdev, PCI_D3hot, 1); | 4574 | retval = pci_enable_wake(pdev, PCI_D3hot, 1); |
@@ -4617,12 +4619,12 @@ e1000_resume(struct pci_dev *pdev) | |||
4617 | e1000_reset(adapter); | 4619 | e1000_reset(adapter); |
4618 | E1000_WRITE_REG(&adapter->hw, WUS, ~0); | 4620 | E1000_WRITE_REG(&adapter->hw, WUS, ~0); |
4619 | 4621 | ||
4620 | if(netif_running(netdev)) | 4622 | if (netif_running(netdev)) |
4621 | e1000_up(adapter); | 4623 | e1000_up(adapter); |
4622 | 4624 | ||
4623 | netif_device_attach(netdev); | 4625 | netif_device_attach(netdev); |
4624 | 4626 | ||
4625 | if(adapter->hw.mac_type >= e1000_82540 && | 4627 | if (adapter->hw.mac_type >= e1000_82540 && |
4626 | adapter->hw.media_type == e1000_media_type_copper) { | 4628 | adapter->hw.media_type == e1000_media_type_copper) { |
4627 | manc = E1000_READ_REG(&adapter->hw, MANC); | 4629 | manc = E1000_READ_REG(&adapter->hw, MANC); |
4628 | manc &= ~(E1000_MANC_ARP_EN); | 4630 | manc &= ~(E1000_MANC_ARP_EN); |